metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "johnmartinsson/adversarial-representation-learning",
"score": 3
} |
#### File: johnmartinsson/adversarial-representation-learning/sanity_check.py
```python
from datasets import celeba
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
def main():
celeba_sample = celeba.CelebADataset(
split='train',
in_memory=True,
input_shape=(64, 64),
utility_attr='Male',
secret_attr='Smiling')
sample = celeba_sample[0]
image = sample['image']
utility = sample['utility']
secret = sample['secret']
print("plotting images ... ")
fig, axarr = plt.subplots(3, 4, figsize=(4*2, 3*2))
for i in range(3):
for j in range(4):
idx = j + (i*4)
sample = celeba_sample[idx]
image = sample['image']
assert(np.min(image) >= 0)
assert(np.max(image) <=1)
utility = sample['utility']
secret = sample['secret']
axarr[i,j].imshow(sample['image'])
axarr[i,j].set_title("{}, {}".format(secret, utility))
axarr[i,j].axis('off')
fig.suptitle("[smiling, male]")
plt.savefig("sanity_check.png")
if __name__ == '__main__':
main()
```
#### File: adversarial-representation-learning/vis/create_attributes_experiment_plot.py
```python
import os
import pickle
import json
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
def scatterplot(attribute_idx, table, epsilons, label, ax):
ys = []
xs = []
for eps in epsilons:
row = table[eps]
y = row[attribute_idx]
ys.append(y)
xs.append(float(eps))
ax.scatter(x=xs, y=ys, label=label)
def main():
artifacts_dir = 'artifacts/attributes_experiment/'
epsilons = ['0.001', '0.005', '0.01', '0.05']
attributes = ['Smiling', 'Male', 'Wearing_Lipstick', 'Young']
mean_table = {}
std_table = {}
for eps in epsilons:
mean_table[eps] = []
std_table[eps] = []
for attr in attributes:
gen_secret_accs = []
for i in range(5):
results_path = os.path.join(artifacts_dir,
'{}_eps_{}'.format(attr, eps), str(i), 'results.json')
with open(results_path, 'r') as f:
res = json.load(f)
gen_secret_accs.append(res['gen_secret_acc'] * 100)
mean_table[eps].append(np.mean(gen_secret_accs))
std_table[eps].append(np.std(gen_secret_accs))
plt.rcParams["mathtext.fontset"] = "cm"
fig, ax = plt.subplots()
for i, attr in enumerate(attributes):
scatterplot(i, mean_table, epsilons, label=attr, ax=ax)
plt.ylabel("Fool fixed classifier [%]")
plt.xlabel("$\epsilon$")
plt.legend(loc="lower right")
plt.savefig("fool_fixed_classifier.pdf")
if __name__ == '__main__':
main()
```
#### File: adversarial-representation-learning/vis/privacy_vs_utility_plot.py
```python
import os
import pickle
import json
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
def scatterplot(data, label, point_labels, ax, color, style='-o'):
ax.plot([x[0] for x in data], [x[1] for x in data], style, label=label,
color=color)
for i, txt in enumerate(point_labels):
xy = (data[i][0]-0.9, data[i][1]+0.5)
ax.annotate(r'$\epsilon = {}$'.format(txt), xy, fontsize=8)
def load_results(epsilons, ours_dir, baseline_dir, nb_runs, attr):
adv_ours = []
adv_bline = []
for eps in epsilons:
bline_adv_secret_acc = 0.0
ours_adv_secret_acc = 0.0
bline_mean_utility_acc = 0.0
ours_mean_utility_acc = 0.0
for i in range(nb_runs):
ours_experiment_dir = os.path.join(ours_dir, '{}_eps_{}'.format(attr, eps), str(i))
bline_experiment_dir = os.path.join(baseline_dir, '{}_eps_{}'.format(attr, eps), str(i))
ours_results_path = os.path.join(ours_experiment_dir, 'results.json')
bline_results_path = os.path.join(bline_experiment_dir, 'results.json')
with open(ours_results_path, 'r') as f:
ours_res = json.load(f)
with open(bline_results_path, 'r') as f:
bline_res = json.load(f)
bline_adv_secret_acc += bline_res['secret_adv_acc']*100
ours_adv_secret_acc += ours_res['secret_adv_acc']*100
bline_mean_utility_acc += np.load(os.path.join(ours_experiment_dir, 'bline_mean_utility_acc.npy'))*100
ours_mean_utility_acc += np.load(os.path.join(ours_experiment_dir, 'ours_mean_utility_acc.npy'))*100
adv_bline.append((bline_adv_secret_acc/nb_runs, bline_mean_utility_acc/nb_runs))
adv_ours.append((ours_adv_secret_acc/nb_runs, ours_mean_utility_acc/nb_runs))
return adv_ours, adv_bline
def main():
artifacts_dir = 'artifacts/attributes_experiment/'
baseline_dir = 'artifacts/attributes_baseline_experiment/'
epsilons = ['0.001', '0.005', '0.01', '0.02', '0.03']
use_filter = 'True'
for attr in ['Smiling', 'Male', 'Wearing_Lipstick', 'Young']:
if attr in ['Smiling', 'Male']: # entropy loss only run for two attributes
adv_ours_ent, adv_bline_ent = load_results(epsilons,
'artifacts/attributes_entropy_experiment',
'artifacts/attributes_entropy_baseline_experiment', 2, attr)
adv_ours, adv_bline = load_results(epsilons,
'artifacts/attributes_experiment',
'artifacts/attributes_baseline_experiment', 5, attr)
plt.rcParams["mathtext.fontset"] = "cm"
fig, ax = plt.subplots()
scatterplot(adv_ours, label='ours (log-likelihood)', point_labels=epsilons, ax=ax,
color='green')
scatterplot(adv_bline, label='baseline (log-likelihood)', point_labels=epsilons, ax=ax,
color='red')
if attr in ['Smiling', 'Male']: # ablation only run for two attributes
scatterplot(adv_ours_ent, label='ours (entropy)', point_labels=epsilons,
ax=ax, color='green', style='--x')
scatterplot(adv_bline_ent, label='baseline (entropy)',
point_labels=epsilons, ax=ax, color='red', style='--x')
if attr == 'Wearing_Lipstick':
plt.title('Lipstick')
elif attr == 'Young':
plt.title('Age')
else:
plt.title(attr)
plt.ylabel("Utility score")
plt.xlabel("Privacy loss".format(attr.lower()))
plt.legend(loc='upper left')
plt.savefig('{}_privacy_utility_tradeoff.pdf'.format(attr))
if __name__ == '__main__':
main()
``` |
{
"source": "JohnMasapantaPozo/Litho-Machine-Leraning-Web-App",
"score": 3
} |
#### File: JohnMasapantaPozo/Litho-Machine-Leraning-Web-App/main.py
```python
from operator import index
from pandas.core.arrays import categorical
import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
import pickle
from tensorflow import keras
import os
from utilities import fixed_clustering_plot, litho_confusion_matrix, scatter_plot, predictions_plot
st.set_option('deprecation.showPyplotGlobalUse', False)
def _max_width_():
max_width_str = f"max-width: 1200px;"
st.markdown(
f"""
<style>
.reportview-container .main .block-container{{
{max_width_str}
}}
</style>
""",
unsafe_allow_html=True,
)
_max_width_()
st.title("MACHINE AND DEEP LEARNING FOR LITHOLOGY PREDICTION IN THE NORTH SEA - FORCE DATASET")
st.markdown("<h3 style='text-align: right; color: black;'> Author: <NAME></h3>", unsafe_allow_html=True)
st.sidebar.title("VISUALIZATION SELECTION")
st.sidebar.markdown("""<img src="https://allaboutintelligence.files.wordpress.com/2020/09/deep-learning-methodologies-and-applications.gif?w=775" style="width: 100%; cursor: default;" >""", unsafe_allow_html=True)
#st.sidebar.markdown("Select the well you want to visualize:")
#--------------------------------------
## 1. SELECTING DATASET
st.sidebar.markdown("-------")
st.sidebar.subheader("SELECTING DATASET")
selected_dataset = st.sidebar.radio("Select the dataset you want to predict and visualize:", options=['Open dataset', 'Hidden dataset'], index=1)
#loading raw data
<EMAIL>(allow_output_mutation=True, max_entries=10, ttl=3600)
def load_raw():
#main_directory = 'C:/Users/Alexandra/Desktop/GEOSCIENCES MASTERS PROGRAM/THESIS PROJECT/Thesis/App_thesis/raw_data/'
lithology_numbers = {30000: 0, 65030: 1, 65000: 2, 80000: 3, 74000: 4, 70000: 5,
70032: 6, 88000: 7, 86000: 8, 99000: 9, 90000: 10, 93000: 11
}
#raw_hidden = pd.read_csv(main_directory + "\hidden_test.csv", sep=";")
raw_hidden = pd.read_csv(r"./raw_data/hidden_test.csv", sep=";")
raw_hidden = raw_hidden.rename(columns={'FORCE_2020_LITHOFACIES_LITHOLOGY':'LITHO'})
raw_hidden = raw_hidden.drop(['FORCE_2020_LITHOFACIES_CONFIDENCE'], axis=1)
raw_hidden['LITHO'] = raw_hidden["LITHO"].map(lithology_numbers)
test_data = pd.read_csv("./raw_data/open_test.csv", sep=';')
test_labels = pd.read_csv("./raw_data/open_target.csv", sep=';')
raw_open = pd.merge(test_data, test_labels, on=['WELL', 'DEPTH_MD'])
raw_open = raw_open.rename(columns={'FORCE_2020_LITHOFACIES_LITHOLOGY':'LITHO'})
raw_open['LITHO'] = raw_open["LITHO"].map(lithology_numbers)
return raw_open, raw_hidden
raw_open, raw_hidden = load_raw()
#loading treated data
#@st.cache(allow_output_mutation=True, max_entries=10, ttl=3600)
def load_treated(file_name):
if os.path.exists(os.path.join(r"./real_time_predictions", file_name + ".csv")):
loaded_df = pd.read_csv(r"./real_time_predictions/"+ file_name + ".csv")
else:
#df_hidden_treated.to_csv(os.path.join(r"./real_time_predictions/" , file_name + ".csv"))
loaded_df = pd.read_csv(r"./treated_data/"+ file_name + ".csv")
return loaded_df
data_dict = {'Open dataset': 'open_data', 'Hidden dataset': 'hidden_data'}
if selected_dataset == 'Open dataset':
raw_data = raw_open
# file_name = data_dict[selected_dataset]
# selected_data = load_treated(file_name)
else:
raw_data = raw_hidden
# file_name = data_dict[selected_dataset]
# selected_data = load_treated(file_name)
file_name = data_dict[selected_dataset]
selected_data = load_treated(file_name)
#---------------------------------
#2. RAW DATA VISUALIZATION
st.sidebar.markdown("-------")
st.sidebar.subheader("RAW DATA VISUALIZATION")
#selecting well
raw_well = st.sidebar.selectbox("Select the well to be visualized", raw_data.WELL.unique(), index=0)
raw_well_df = raw_data[raw_data.WELL == raw_well].set_index("DEPTH_MD")
#selecting logs
raw_logs = raw_well_df.columns.drop(["WELL", "X_LOC", "Y_LOC", "Z_LOC", "GROUP",
"FORMATION", 'LITHO'])
raw_continuous_logs = st.sidebar.multiselect("Select the well logs to display", raw_logs, default='GR', key='raw_logs')
raw_facies_logs = ['LITHO']
raw_cols = [*raw_continuous_logs, *raw_facies_logs]
raw_well_df = raw_well_df[raw_cols]
if st.sidebar.button('Plot raw logs', key='raw_logs_plot'):
#if st.sidebar.checkbox("PLOT PREDICTIONS", True, key='1'):
st.markdown("### RAW LOGS AND ACTUAL FACIES")
st.markdown("<h3 style='text-align: center; color: black;'> WELL {} </h3>".format(raw_well), unsafe_allow_html=True)
pred_plot = fixed_clustering_plot(raw_well_df, raw_cols, raw_facies_logs)
st.pyplot(fig=pred_plot, use_container_width=True)
#---------------------------------------
## 2. MAKING PREDICTIONS REAL TIME
st.sidebar.markdown("-------")
st.sidebar.subheader("REAL TIME PREDICTIONS")
# uploading pretrained models
import sklearn
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
import xgboost
import lightgbm
import catboost
<EMAIL>
def pretrained_models():
desicion_tree = pickle.load(open(r"./models/decision_tree.pkl", 'rb'))
k_nn = pickle.load(open(r"./models/k_nearest_neight.pkl", 'rb'))
logistic_regression = pickle.load(open(r"./models/logistic_regression.pkl", 'rb'))
#extreme_gb = pickle.load(open('models\extreme_gb.sav', 'rb'))
categorical_gb = pickle.load(open(r"./models/categorical_gb.pkl", 'rb'))
light_gb = pickle.load(open(r"./models/light_gb.pkl", 'rb'))
neural_network = keras.models.load_model(r"./models/neural_network.h5")
return desicion_tree, k_nn, logistic_regression, light_gb, categorical_gb, neural_network
dt_model, knn_model, lr_model, light_model, cat_model, nn_model = pretrained_models()
models = [lr_model, dt_model, knn_model, nn_model, light_model, cat_model]
models_names = ['Logistic Regression', 'Decision Tree', 'K-NN', 'Neural Network', 'LGBMBOOST', 'CATBOOST']
models_dict = dict(zip(models_names, models))
def features_selection(model_name):
if model_name == 'Logistic Regression':
selected_features = ['DTS_COMB', 'G', 'P_I', 'GR','NPHI_COMB',
'DTC', 'RHOB', 'DT_R', 'Z_LOC', 'S_I','K']
elif model_name == 'Decision Tree':
selected_features = ['Cluster', 'DEPTH_MD', 'X_LOC', 'Y_LOC', 'Z_LOC', 'CALI',
'RSHA', 'RMED', 'RDEP', 'RHOB', 'GR', 'NPHI', 'PEF', 'DTC', 'SP', 'BS',
'ROP', 'DTS', 'DCAL', 'DRHO', 'RMIC', 'GROUP_encoded',
'FORMATION_encoded', 'WELL_encoded', 'DTS_pred', 'DTS_COMB',
'NPHI_pred', 'NPHI_COMB', 'RHOB_pred', 'RHOB_COMB', 'DTC_pred',
'DTC_COMB', 'S_I', 'P_I', 'DT_R', 'G', 'K', 'MD_TVD']
elif model_name == 'K-NN':
selected_features = ['GR', 'FORMATION_encoded', 'GROUP_encoded', 'NPHI_COMB', 'RHOB',
'X_LOC', 'BS', 'CALI', 'SP', 'WELL_encoded', 'Z_LOC', 'DT_R', 'DEPTH_MD', 'DTC', 'Cluster']
elif model_name == 'Neural Network':
selected_features = ['GROUP_encoded', 'GR', 'NPHI_COMB', 'Y_LOC', 'RHOB',
'DEPTH_MD', 'FORMATION_encoded', 'Z_LOC', 'WELL_encoded', 'X_LOC',
'RMED', 'CALI', 'DTC', 'MD_TVD', 'DT_R',
'PEF', 'RDEP', 'DTS_COMB', 'G', 'SP',
'Cluster', 'K', 'P_I', 'DRHO', 'DCAL']
elif model_name == 'LGBMBOOST':
selected_features = ['RDEP', 'GR', 'NPHI_COMB', 'G', 'DTC', 'DTS_COMB', 'RSHA', 'DT_R',
'RHOB', 'K', 'DCAL', 'Y_LOC', 'GROUP_encoded', 'WELL_encoded',
'DEPTH_MD', 'Z_LOC', 'CALI', 'X_LOC', 'RMED', 'PEF', 'SP', 'MD_TVD',
'ROP', 'DRHO']
elif model_name == 'CATBOOST':
selected_features = ['GR', 'NPHI_COMB', 'DTC', 'DTS_COMB','RHOB',
'Y_LOC', 'GROUP_encoded', 'WELL_encoded',
'FORMATION_encoded', 'DEPTH_MD', 'Z_LOC', 'CALI',
'X_LOC', 'RMED', 'SP', 'MD_TVD']
return selected_features
#Model selection
selected_models = st.sidebar.multiselect("Select model(s) to use during the predictions:", ['Logistic Regression', 'Decision Tree', 'K-NN', 'LGBMBOOST', 'CATBOOST', 'Neural Network'], default='LGBMBOOST')
models_header = {'Logistic Regression': 'LR_PRED',
'Decision Tree': 'DT_PRED',
'K-NN': 'KNN_PRED',
'LGBMBOOST': 'LGBM_PRED',
'CATBOOST': 'CAT_PRED',
'Neural Network' : 'NN_PRED'
}
#Predictions
results_data = raw_data.copy()
if st.sidebar.checkbox('Store results in cache:', True, key='updating_predictions'):
if st.sidebar.button('RUN', key='predict'):
for model_name in selected_models:
if model_name != 'Neural Network':
if not models_header[model_name] in selected_data.columns:
model_i = models_dict[model_name]
selected_features = features_selection(model_name)
y_hat = model_i.predict(selected_data[selected_features])
selected_data[models_header[model_name]] = y_hat
#storing results
selected_data.to_csv(r'./real_time_predictions/'+ file_name + '.csv')
results_data[models_header[model_name]] = selected_data[models_header[model_name]] #results frame
else:
results_data[models_header[model_name]] = selected_data[models_header[model_name]] #results frame
#results_data.to_csv(os.path.join(r"./predictions_visual/" , file_name + '.csv'))
else:
if not models_header[model_name] in selected_data.columns:
model_i = models_dict[model_name]
selected_features = features_selection(model_name)
y_hat_prob = model_i.predict(selected_data[selected_features])
y_hat = np.array(pd.DataFrame(y_hat_prob).idxmax(axis=1))
selected_data[models_header[model_name]] = y_hat
#storing results
selected_data.to_csv(r'./real_time_predictions/'+ file_name + '.csv')
results_data[models_header[model_name]] = selected_data[models_header[model_name]] #results frame
#selected_data.to_csv(r'./real_time_predictions/'+ file_name + '.csv')
else:
results_data[models_header[model_name]] = selected_data[models_header[model_name]] #results frame
#results_data.to_csv(r'./real_time_predictions/'+ 'hidden_res_saving' + '.csv')
results_data.to_csv(os.path.join(r"./predictions_visual/" , file_name + '.csv'))
else:
if st.sidebar.button('RUN', key='predict'):
for model_name in selected_models:
if model_name != 'Neural Network':
if not models_header[model_name] in selected_data.columns:
model_i = models_dict[model_name]
selected_features = features_selection(model_name)
y_hat = model_i.predict(selected_data[selected_features])
selected_data[models_header[model_name]] = y_hat
results_data[models_header[model_name]] = selected_data[models_header[model_name]] #results frame
#selected_data.to_csv(r'./real_time_predictions/'+ file_name + '.csv')
else:
results_data[models_header[model_name]] = selected_data[models_header[model_name]] #results frame
#results_data.to_csv(r'./real_time_predictions/'+ 'hidden_res_saving' + '.csv')
#results_data.to_csv(os.path.join(r"./predictions_visual/" , file_name + '.csv'))
else:
if not models_header[model_name] in selected_data.columns:
model_i = models_dict[model_name]
selected_features = features_selection(model_name)
y_hat_prob = model_i.predict(selected_data[selected_features])
y_hat = np.array(pd.DataFrame(y_hat_prob).idxmax(axis=1))
selected_data[models_header[model_name]] = y_hat
results_data[models_header[model_name]] = selected_data[models_header[model_name]] #results frame
#selected_data.to_csv(r'./real_time_predictions/'+ file_name + '.csv')
else:
results_data[models_header[model_name]] = selected_data[models_header[model_name]] #results frame
#results_data.to_csv(r'./real_time_predictions/'+ 'hidden_res_saving' + '.csv')
results_data.to_csv(os.path.join(r"./predictions_visual/" , file_name + '.csv'))
st.sidebar.markdown('----------------')
st.sidebar.subheader("RESULTS VISUALIZATION")
import sklearn
from sklearn.metrics import accuracy_score, recall_score, precision_score, classification_report
#if st.sidebar.checkbox('Show predition results:', False, key='showing_predictions'):
if not os.path.exists(os.path.join(r"./predictions_visual/" , file_name + '.csv')):
st.text('NO STORED RESULTS IN LOCAL DISK. PLEASE RUN PREDICTIONS SECTIONS FIRST...')
else:
results_data = pd.read_csv(os.path.join(r"./predictions_visual/" , file_name + '.csv'))
if all(elem in results_data.columns for elem in [models_header[model_name] for model_name in selected_models]):
selected_well = st.sidebar.selectbox("Select the well to be visualized", results_data.WELL.unique(), index=0, key='predicted_well')
selected_well_df = results_data[results_data.WELL == selected_well].set_index("DEPTH_MD")
facies_logs_names = [models_header[model_name] for model_name in selected_models if models_header[model_name] in results_data.columns] + ['LITHO']
logs_drop = ["WELL", "X_LOC", "Y_LOC", "Z_LOC", "GROUP", "FORMATION", 'LITHO'] + facies_logs_names
log_names = selected_well_df.columns.drop(logs_drop)
# continuous_logs = st.sidebar.multiselect("Select the well logs to display:", log_names, default='GR', key='no_raw_logs')
# facies_logs = st.sidebar.multiselect("Select facies logs to visualize:", facies_logs_names, default='LITHO')
with st.container():
#selecting logs
continuous_logs = st.sidebar.multiselect("Select the well logs to display:", log_names, default='GR', key='no_raw_logs')
facies_logs = st.sidebar.multiselect("Select facies logs to visualize:", facies_logs_names, default='LITHO')
#final dataframe
cols = [*continuous_logs, *facies_logs]
DF = selected_well_df[cols]
#Showing scatter plots
if st.sidebar.checkbox('Show scatter plots and classification reports:', False, key='showing_scatter'):
col1, col2 = st.columns(2)
with col1:
model1 = st.sidebar.selectbox('Model 1 to display:', facies_logs, index=0, key='model1')
scat1 = st.sidebar.multiselect('First scatter plot:', log_names, default=['GR', 'RHOB'])
#color1 = st.sidebar.selectbox('Color coded by:', facies_logs, index=0, key='color_label1')
with col2:
model2 = st.sidebar.selectbox('Second scatter plot:', facies_logs, index=0, key='model2')
scat2 = st.sidebar.multiselect('Model 2 to display:', log_names, default=['NPHI', 'DTC'])
if st.sidebar.button('Plot predictions', key='predictions'):
# if st.sidebar.checkbox('Show scatter plots', True, key='2'):
st.markdown("## **PREDICTED FACIES**")
st.markdown("<h3 style='text-align: center; color: black;'> WELL {} </h3>".format(selected_well), unsafe_allow_html=True)
pred_plot = predictions_plot(DF, cols, facies_logs)
st.pyplot(fig=pred_plot, use_container_width=True)
st.markdown('------------')
st.markdown("## **STATISTICS VISUALIZATION**")
col1, col2 = st.columns(2)
with col1:
#color1 = st.sidebar.selectbox('Color coded by:', facies_logs, index=0, key='color_label1')
st.markdown("<h3 style='text-align: center; color: black;'> {} PREDICTED LITHOLOGY</h3>".format(model1), unsafe_allow_html=True)
st.plotly_chart(scatter_plot(selected_well_df, scat1[0], scat1[1], model1))
st.plotly_chart(scatter_plot(selected_well_df, scat2[0], scat2[1], model1))
with col2:
st.markdown("<h3 style='text-align: center; color: black;'> {} PREDICTED LITHOLOGY</h3>".format(model2), unsafe_allow_html=True)
st.plotly_chart(scatter_plot(selected_well_df, scat1[0], scat1[1], model2))
st.plotly_chart(scatter_plot(selected_well_df, scat2[0], scat2[1], model2))
# conf_matrix = litho_confusion_matrix(DF[actual_litho], DF[model_litho])
# st.pyplot(fig=conf_matrix)
st.markdown('------------')
st.markdown("## **CLASSIFICATION REPORTS**")
#showing classification reports
# cols_names = ['col'+ str(i) for i in range(len(selected_models))]
# cols_names = st.columns(len(cols_names))
col_1, col_2 = st.columns(2)
with col_1:
st.markdown("<h3 style='text-align: center; color: black;'> {} CLASSIFICATION REPORT</h3>".format(model1), unsafe_allow_html=True)
st.dataframe(pd.DataFrame(classification_report(results_data.LITHO, results_data[model1], output_dict=True)).T)
with col_2:
st.markdown("<h3 style='text-align: center; color: black;'> {} CLASSIFICATION REPORT</h3>".format(model2), unsafe_allow_html=True)
st.dataframe(pd.DataFrame(classification_report(results_data.LITHO, results_data[model2], output_dict=True)).T)
else:
if st.sidebar.button('Plot predictions', key='predictions'):
st.markdown("### PREDICTED FACIES")
st.markdown("<h3 style='text-align: center; color: black;'> WELL {} </h3>".format(selected_well), unsafe_allow_html=True)
pred_plot = predictions_plot(DF, cols, facies_logs)
st.pyplot(fig=pred_plot, use_container_width=True)
else:
st.text('SOME MODELS YOU WANT TO VISUALIZE ARE MISSING IN THE STORED RESULTS. PLEASE RUN PREDICTIONS SECTIONS FIRST...')
``` |
{
"source": "johnmason27/ai-jarvis",
"score": 3
} |
#### File: jarvis/services/file_service.py
```python
import json
class FileService:
'''
Contains methods responsible with handling files.
'''
def __init__(self, filepath):
'''
Configure the filepath for use in methods.
'''
self.filepath = filepath
def read_appsettings(self):
'''
Reads the appsettings for use in the program.
Returns:
appsettings: dictionary, containing the appsettings.
'''
try:
with open(self.filepath, "r") as file_content:
# Turn Json into dictionary.
appsettings = json.load(file_content)
except FileNotFoundError:
print("Failed, file not found.")
except PermissionError:
print("Failed, file permissions error.")
return appsettings
```
#### File: jarvis/services/internet_service.py
```python
import webbrowser
import time
import wikipedia
from assistant_logic.assistant import Assistant
class InternetService:
'''
Houses all the logic responsible for the internet.
'''
def __init__(self):
'''
Setting up the dependencies.
'''
self.assistant = Assistant()
def open_tab(self, url, website_name):
'''
Open a new tab for the user.
Args:
url: string, url search string.
website_name: string, website name.
'''
webbrowser.open_new_tab(url)
self.assistant.speak(f"{website_name} is open now")
print(f"{website_name} is open now")
time.sleep(1)
def search_web(self, statement):
'''
Open a new browser based of the statement given by the user.
Args:
statement: string, user command.
'''
statement = statement.replace("search", "")
search_query = ""
statement = statement.split()
# Create the query string.
for item in range(len(statement)):
if item == 0:
search_query += statement[item]
else:
search_query += "+" + statement[item]
webbrowser.open_new_tab(f"https://www.bing.com/search?q={search_query}")
self.assistant.speak("Website is open")
print("Website is open")
time.sleep(1)
def search_wikipedia(self, statement):
'''
Search wikipedia based off the user statement.
Args:
statement: string, user command.
'''
self.assistant.speak("Searching Wikipedia...")
print("Searching Wikipedia...")
# Fetch wikipedia data.
try:
# Get rid of the word wikipedia in the statement.
statement = statement.replace("wikipedia", "")
results = wikipedia.summary(statement, sentences = 3)
self.assistant.speak("According to Wikipedia")
print(results)
self.assistant.speak(results)
except Exception:
self.assistant.speak("That Wikipedia page couldn't be found")
print("That Wikipedia page couldn't be found")
time.sleep(1)
```
#### File: jarvis/services/quiz_service.py
```python
import time
import random
from assistant_logic.assistant import Assistant
class QuizService:
'''
Houses the quiz service logic.
'''
def __init__(self):
'''
Setup the quiz service dependencies.
'''
self.assistant = Assistant()
self.questions = [("What is the biggest animal in the world?", "blue whale"), ("Which country did brie cheese originate from?", "france"),
("What year was Heniz established?", "1869"), ("What is a baby rabbit called?", "kit"),
("As of 2020 who is manager of the england football team?", "gareth southgate"), ("What does He stand for on the periodic table?", "helium"),
("What is the capital of Australia?", "canberra"), ("Which bird can fly backwards?", "hummingbird"),
("When did the Vietnam War end?", "1975"), ("Which hit video game series has released games called World At War and Black Ops?", "call of duty"),
("What building did I'm a Celebrity 2020 taking place in?", "castle"), ("What type of nut is in the middle of a Ferrero Rocher?", "hazelnut"),
("What is a baby kangaroo called?", "joey"), ("What's the national flower of Japan?", "cherry blossom"),
("Which football team is known as The Red Devils?", "manchester united"), ("Which football team is known as The Baggies?", "west bromwich albion"),
("How many ghosts visit Scrooge in A Christmas Carol?", "4"), ("Which TV series has an alternate universe called The Upside Down?", "stranger things"),
("In Texas, it’s illegal to swear in front of what?", "corpse"), ("What was <NAME>’s natural hair color?", "red"),
("What do you call a group of unicorns?", "blessing"), ("What is banned in public places in Florida after 6 pm on a Thursday?", "farting"),
("What animal cannot stick out its tongue?", "crocodile"), ("With how many bricks is the Empire State Building made of?", "10 million"),
("According to Russian law, a homeless person must be where after 10 pm?", "at home"), ("How many years old the oldest piece of chewing gum?", "9000 years"),
("On Sunday, what is illegal to sell in Columbus, Ohio?", "cornflake"), ("What is illegal to eat with a cherry pie in Kansas?", "ice cream"),
("On average, what is the thing that Americans do 22 times in a day?", "open the fridge"), ("A crossbreed between a donkey and the zebra is known as?", "zonkey"),
("What was the first fruit that was eaten on the moon?", "peach"), ("How do you tell the age of a horse?", "it's teeth"),
("What sport has been played on the moon?", "golf"), ("How many noses does a slug have?", "four"),
("What were clocks missing before 1577?", "minute hands")]
def play_quiz(self):
'''
Play the quiz game.
Asked 5 questions and given 10 seconds to think of an anwser.
Then the assistant will ask for your asnwer and store whether you
got it right or wrong.
At the end you will get given a score out of 5.
'''
print("Welcome to the Quiz! Here's how this is going to work. Your going to be asked 5 questions total and then given a score out of 5 at the end. " +
"Each round you get asked a question and then given 10 seconds to think of an answer. After the 10 seconds are up you will asked by Jarvis for your answer. Until you " +
"have answered 5 questions. Once you have completed the quiz you'll get your score.")
self.assistant.speak("Welcome to the Quiz! Here's how this is going to work. Your going to be asked 5 questions total and then given a score out of 5 at the end. " +
"Each round you get asked a question and then given 10 seconds to think of an answer. After the 10 seconds are up you will be asked by Jarvis for your answer. Until you " +
"have answered 5 questions. Once you have completed the quiz you'll get your score.")
previous_questions = []
score = 0
# Perform 5 rounds.
for round in range(5):
self.assistant.speak(f"Round {round + 1}")
print(f"Round {round + 1}")
running = True
exists = True
# Get the question from the list and get another if it already exists.
while running:
random_question = random.choices(self.questions)
if random_question in previous_questions:
random_question = random.choices(self.questions)
else:
running = False
previous_questions.append(random_question)
self.assistant.speak(random_question[0][0])
print(random_question[0][0])
time.sleep(10)
# Get the user's answer.
while True:
self.assistant.speak("What is your answer?")
print("What is your answer?")
statement = self.assistant.take_command()
if statement != "None":
break
# Check the answer is correct or not.
if statement.lower() in random_question[0][1]:
print("Correct")
self.assistant.speak("Correct")
score += 1
else:
print("Incorrect")
self.assistant.speak("Incorrect")
time.sleep(1)
# Final score and exit speech.
print("Quiz Complete!")
self.assistant.speak("Quiz Complete!")
print("Drum role please...")
self.assistant.speak("Drum role please...")
print(f"Your final score is, {score} points!")
self.assistant.speak(f"Your final score is, {score} points!")
print("Well done and thanks for playing!")
self.assistant.speak("Well done thanks for playing!")
time.sleep(1)
```
#### File: jarvis/services/simon_says_service.py
```python
import time
import random
from assistant_logic.assistant import Assistant
class SimonSaysService:
'''
Houses the simon says services.
'''
def __init__(self):
'''
Setup for the service dependencies.
'''
self.assistant = Assistant()
self.game_commands = ["Touch your head", "Touch your toes", "Spin around", "Touch your ears", "Touch your ears", "Touch your eyes", "Jump!", "Touch your hair",
"Do 5 jumping jacks", "Do 5 press ups", "Do 5 situps", "Do 5 Burpees", "Wink with your left eye", "Wink with your right eye", "Walk like a penguin",
"Act like a monkey", "Sit down", "Stand on one foot", "Touch your nose", "Pretend to ride a horse", "Pretend to swim", "Turn around",
"Walk on the spot", "Pretend to sit on a chair", "Don't move", "Pretend to lift a very heavy box", "Pretend to drink water", "Walk in a straight line",
"Pretend to be cold", "Moo like a cow", "Whispher your name", "Do a silly dance", "Move in slow motion"]
self.simon_commands = ["Simon says", ""]
def mirror_me(self):
'''
Repeat whatever the user says.
'''
# Get the user command to mirror and repeat if they don't say anything.
while True:
self.assistant.speak("What does Simon say?")
print("What does Simon say?")
command = self.assistant.take_command()
if command != "None":
break
# Speak what they say.
self.assistant.speak("Simon says, " + command)
print("Simon says, " + command)
time.sleep(1)
def simon_says_game(self):
'''
Simon says game. Tasks give at random to perform 10 times.
If the command doesn't have simon says at the front you shouldn't
complete the task and visversa.
'''
# Perform 10 rounds.
for round in range(10):
# Get a random game command and random start of command.
simon_command = random.choices(self.simon_commands)
command = random.choices(self.game_commands)
# Print and speak new command.
if simon_command[0] != "":
self.assistant.speak(simon_command[0] + " " + command[0])
print(simon_command[0] + " " + command[0])
else:
self.assistant.speak(command[0])
print(command[0])
time.sleep(5)
self.assistant.speak("Game over! Thanks for playing!")
print("\nGame over! Thanks for playing!")
time.sleep(1)
``` |
{
"source": "JohnMasoner/MedicalZoo",
"score": 2
} |
#### File: MedicalZoo/data/common.py
```python
import monai
import torch
import re
from data import Dataloader2d, Dataloader3d
from transforms import Transforms
from transforms.common import transform
def dataset(config, set_type):
'''
Choose a dataset to train or test
Args:
config['configparser']: The configuration
set_type: The type of dataset to train or test
Return:
dataset_loader
'''
dim = config['Data']['Dimension']
assert dim in ['2','3'], 'Please check you config file, make sure dimensions is 2 or 3'
assert set_type in ['train','test'], 'Please check your dataset type, make sure set_type is train or test'
data_type = config['Data']['DataType']
data_type = data_type if len(list(re.sub('[!@#$%^&*]', '', data_type).split(','))) == 1 else list(re.sub('[!@#$%^&*]', '', data_type).split(','))
if dim == '2':
if isinstance(data_type, str): # to judge the mono-modal or multi-modal, if mono-modal is True, or multi-modal is False
if set_type == 'train':
adjacent_layer =None if config['Data']['AdjacentLayer'].lower() == 'none' or not config['Data']['AdjacentLayer'].isdigit() else int(config['Data']['AdjacentLayer'])
train_dataset = Dataloader2d.MonoMedDataSets2D(config['Paths']['file_dir'], file_mode='NPY_train', data_type=data_type, adjacent_layer=adjacent_layer, transform = transform(config))
train_dataload = torch.utils.data.DataLoader(train_dataset, batch_size= int(config['Data']['BatchSize']), num_workers= int(config['Data']['NumWorkers']), shuffle = True)
return train_dataload
elif set_type == 'test':
validate_dataset = Dataloader2d.MonoMedDataSets2DTest(config['Paths']['file_dir'],file_mode='NPY_val', data_type=data_type)
validate_load = torch.utils.data.DataLoader(validate_dataset, batch_size= 1)
return validate_load
else:
raise ValueError('Error Set Type')
elif isinstance(data_type, list): # to judge the mono-modal or multi-modal, if multi-modal is True
if set_type == 'train':
adjacent_layer =None if config['Data']['AdjacentLayer'].lower() == 'none' or not config['Data']['AdjacentLayer'].isdigit() else int(config['Data']['AdjacentLayer'])
train_dataset = Dataloader2d.MultiMedDatasets2D(config['Paths']['file_dir'], file_mode='NPY_train', data_type=data_type, adjacent_layer=adjacent_layer, transform = transform(config))
train_dataload = torch.utils.data.DataLoader(train_dataset, batch_size=int(config['Data']['BatchSize']), num_workers = int(config['Data']['NumWorkers']), shuffle=True)
return train_dataload
elif set_type == 'test':
adjacent_layer =None if config['Data']['AdjacentLayer'].lower() == 'none' or not config['Data']['AdjacentLayer'].isdigit() else int(config['Data']['AdjacentLayer'])
validate_dataset = Dataloader2d.MultiMedDatasets2DTest(config['Paths']['file_dir'], file_mode='NPY_val', data_type=data_type, adjacent_layer=adjacent_layer)
validate_load = torch.utils.data.DataLoader(validate_dataset, batch_size= 1)
return validate_load
else:
raise ValueError('Error Set Type')
else:
raise Exception('Error Check the Code')
elif dim == '3':
if set_type == 'train':
train_dataset = Dataloader3d.MedDataSets3D(config['Paths']['file_dir'], file_mode='NPY_train', data_type=data_type)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=int(config['Data']['BatchSize']), num_workers=int(config['Data']['NumWorkers']), shuffle=True)
return train_loader
elif set_type == 'test':
validate_dataset = Dataloader3d.MedDataSets3D(config['Paths']['file_dir'], file_mode='NPY_val', data_type=data_type)
validate_load = torch.utils.data.DataLoader(validate_dataset , batch_size=int(config['Data']['BatchSize']),num_workers=int(config['Data']['NumWorkers']), shuffle=True)
return validate_load
else:
raise ValueError('Error Set Type')
else:
raise ValueError('Error Data Dimension, Please check your config file')
def MultiLoader(data_keys, data, types = 'train'):
sample = {}
data_keys = list(data.keys())
data_keys.remove('label')
for i in data_keys:
sample[i] = data[i].type(torch.FloatTensor).cuda(non_blocking=True)
if types == 'train':
data = torch.cat([sample[i] for i in list(sample.keys())], axis=1)
else:
if len(sample.get(next(iter(sample))).shape) == 4:
data = torch.cat([sample[i] for i in list(sample.keys())], axis=2)
elif len(sample.get(next(iter(sample))).shape) == 5:
data = torch.cat([sample[i] for i in list(sample.keys())], axis=2)
else:
ValueError('Please check the dimensions')
return data
```
#### File: MedicalZoo/data/Dataloader3d.py
```python
import os
import numpy as np
import glob
import torch
import random
import monai
from transforms import Transforms
from data import Dataloader2d
class MedDataSets3D(Dataloader2d.MultiMedDatasets2DTest):
def __init__(self, file_dir:str=None, file_mode = None, data_type = None):
self.file_dir = os.path.join(file_dir, file_mode)
self.label = sorted(glob.glob(os.path.join(self.file_dir,'*/label/')))
self.adjacent_layer = None
# self.data_type = [i for i in data_type.replace(' ','').split(',')]
self.data_type = data_type
def __getitem__(self, idx):
sample = {}
sample['label'] = self.Read3DData(self.label[idx])
for i in self.data_type:
sample[f'{i}'] = self.Read3DData(self.label[idx].replace('label', i))
sample = self.RandCropLayer(sample)
return sample
def Read3DData(self, modal_path):
data_list = [os.path.join(modal_path,f'{str(i)}.npy') for i in range(len(glob.glob(os.path.join(modal_path,'*'))))]
data = []
for idx, i in enumerate(data_list):
data.append(self.ReadData(i)[np.newaxis,:])
data = self.Normalization(np.vstack(data), 4)
return data
def RandCropLayer(self, sample):
d_shape = sample['label'].shape[1]
start_layer = random.randint(0,d_shape-32-1)
for i in sample.keys():
sample[i] = sample[i][:,start_layer:start_layer+32,:]
return sample
```
#### File: MedicalZoo/net/unet_sptial.py
```python
from typing import Optional, Sequence, Union
import torch
import torch.nn as nn
from monai.networks.blocks import Convolution, UpSample
from monai.networks.layers.factories import Conv, Pool
from monai.utils import deprecated_arg, ensure_tuple_rep
class AttentionModule(nn.Module):
'''Attention Vission Module, Conver a depth-wise convolution, a depth-wise dilation convolution and a poit-wise convolution
Args:
dim(int): the dimension of the input date
Return:
u * attn(torch.Tensor): the convolution of the attention
'''
def __init__(self, dim):
super().__init__()
# depth-wise convolution
self.conv0 = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
# depth-wise dilation convolution
self.conv_spatial = nn.Conv2d(dim, dim, 7, stride=1, padding=9, groups=dim, dilation=3)
# channel convolution (1×1 convolution)
self.conv1 = nn.Conv2d(dim, dim, 1)
def forward(self, x):
u = x.clone()
attn = self.conv0(x)
attn = self.conv_spatial(attn)
attn = self.conv1(attn)
return u * attn
class TwoConv(nn.Sequential):
"""two convolutions."""
@deprecated_arg(name="dim", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead.")
def __init__(
self,
spatial_dims: int,
in_chns: int,
out_chns: int,
act: Union[str, tuple],
norm: Union[str, tuple],
bias: bool,
dropout: Union[float, tuple] = 0.0,
dim: Optional[int] = None,
):
"""
Args:
spatial_dims: number of spatial dimensions.
in_chns: number of input channels.
out_chns: number of output channels.
act: activation type and arguments.
norm: feature normalization type and arguments.
bias: whether to have a bias term in convolution blocks.
dropout: dropout ratio. Defaults to no dropout.
.. deprecated:: 0.6.0
``dim`` is deprecated, use ``spatial_dims`` instead.
"""
super().__init__()
if dim is not None:
spatial_dims = dim
conv_0 = Convolution(spatial_dims, in_chns, out_chns, act=act, norm=norm, dropout=dropout, bias=bias, padding=1)
att = AttentionModule(out_chns)
conv_1 = Convolution(
spatial_dims, out_chns, out_chns, act=act, norm=norm, dropout=dropout, bias=bias, padding=1
)
self.add_module("conv_0", conv_0)
self.add_module("att", att)
self.add_module("conv_1", conv_1)
class Down(nn.Sequential):
"""maxpooling downsampling and two convolutions."""
@deprecated_arg(name="dim", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead.")
def __init__(
self,
spatial_dims: int,
in_chns: int,
out_chns: int,
act: Union[str, tuple],
norm: Union[str, tuple],
bias: bool,
dropout: Union[float, tuple] = 0.0,
dim: Optional[int] = None,
):
"""
Args:
spatial_dims: number of spatial dimensions.
in_chns: number of input channels.
out_chns: number of output channels.
act: activation type and arguments.
norm: feature normalization type and arguments.
bias: whether to have a bias term in convolution blocks.
dropout: dropout ratio. Defaults to no dropout.
.. deprecated:: 0.6.0
``dim`` is deprecated, use ``spatial_dims`` instead.
"""
super().__init__()
if dim is not None:
spatial_dims = dim
max_pooling = Pool["MAX", spatial_dims](kernel_size=2)
convs = TwoConv(spatial_dims, in_chns, out_chns, act, norm, bias, dropout)
self.add_module("max_pooling", max_pooling)
self.add_module("convs", convs)
class UpCat(nn.Module):
"""upsampling, concatenation with the encoder feature map, two convolutions"""
@deprecated_arg(name="dim", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead.")
def __init__(
self,
spatial_dims: int,
in_chns: int,
cat_chns: int,
out_chns: int,
act: Union[str, tuple],
norm: Union[str, tuple],
bias: bool,
dropout: Union[float, tuple] = 0.0,
upsample: str = "deconv",
pre_conv: Optional[Union[nn.Module, str]] = "default",
interp_mode: str = "linear",
align_corners: Optional[bool] = True,
halves: bool = True,
dim: Optional[int] = None,
):
super().__init__()
if dim is not None:
spatial_dims = dim
if upsample == "nontrainable" and pre_conv is None:
up_chns = in_chns
else:
up_chns = in_chns // 2 if halves else in_chns
self.upsample = UpSample(
spatial_dims,
in_chns,
up_chns,
2,
mode=upsample,
pre_conv=pre_conv,
interp_mode=interp_mode,
align_corners=align_corners,
)
self.convs = TwoConv(spatial_dims, cat_chns + up_chns, out_chns, act, norm, bias, dropout)
def forward(self, x: torch.Tensor, x_e: Optional[torch.Tensor]):
"""
Args:
x: features to be upsampled.
x_e: features from the encoder.
"""
x_0 = self.upsample(x)
if x_e is not None:
# handling spatial shapes due to the 2x maxpooling with odd edge lengths.
dimensions = len(x.shape) - 2
sp = [0] * (dimensions * 2)
for i in range(dimensions):
if x_e.shape[-i - 1] != x_0.shape[-i - 1]:
sp[i * 2 + 1] = 1
x_0 = torch.nn.functional.pad(x_0, sp, "replicate")
x = self.convs(torch.cat([x_e, x_0], dim=1)) # input channels: (cat_chns + up_chns)
else:
x = self.convs(x_0)
return x
class BasicUNet(nn.Module):
@deprecated_arg(
name="dimensions", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead."
)
def __init__(
self,
spatial_dims: int = 2,
in_channels: int = 1,
out_channels: int = 2,
features: Sequence[int] = (32, 32, 64, 128, 256, 32),
act: Union[str, tuple] = ("LeakyReLU", {"negative_slope": 0.1, "inplace": True}),
norm: Union[str, tuple] = ("instance", {"affine": True}),
bias: bool = True,
dropout: Union[float, tuple] = 0.0,
upsample: str = "deconv",
dimensions: Optional[int] = None,
):
super().__init__()
if dimensions is not None:
spatial_dims = dimensions
fea = ensure_tuple_rep(features, 6)
print(f"BasicUNet features: {fea}.")
self.conv_0 = TwoConv(spatial_dims, in_channels, features[0], act, norm, bias, dropout)
self.down_1 = Down(spatial_dims, fea[0], fea[1], act, norm, bias, dropout)
self.down_2 = Down(spatial_dims, fea[1], fea[2], act, norm, bias, dropout)
self.down_3 = Down(spatial_dims, fea[2], fea[3], act, norm, bias, dropout)
self.down_4 = Down(spatial_dims, fea[3], fea[4], act, norm, bias, dropout)
self.upcat_4 = UpCat(spatial_dims, fea[4], fea[3], fea[3], act, norm, bias, dropout, upsample)
self.upcat_3 = UpCat(spatial_dims, fea[3], fea[2], fea[2], act, norm, bias, dropout, upsample)
self.upcat_2 = UpCat(spatial_dims, fea[2], fea[1], fea[1], act, norm, bias, dropout, upsample)
self.upcat_1 = UpCat(spatial_dims, fea[1], fea[0], fea[5], act, norm, bias, dropout, upsample, halves=False)
self.final_conv = Conv["conv", spatial_dims](fea[5], out_channels, kernel_size=1)
def forward(self, x: torch.Tensor):
"""
Args:
x: input should have spatially N dimensions
``(Batch, in_channels, dim_0[, dim_1, ..., dim_N])``, N is defined by `dimensions`.
It is recommended to have ``dim_n % 16 == 0`` to ensure all maxpooling inputs have
even edge lengths.
Returns:
A torch Tensor of "raw" predictions in shape
``(Batch, out_channels, dim_0[, dim_1, ..., dim_N])``.
"""
x0 = self.conv_0(x)
x1 = self.down_1(x0)
x2 = self.down_2(x1)
x3 = self.down_3(x2)
x4 = self.down_4(x3)
u4 = self.upcat_4(x4, x3)
u3 = self.upcat_3(u4, x2)
u2 = self.upcat_2(u3, x1)
u1 = self.upcat_1(u2, x0)
logits = self.final_conv(u1)
return logits
```
#### File: MedicalZoo/transforms/Transforms.py
```python
import monai
import torch
import random
class RandCropData:
def __init__(self, roi_size:int):
self.roi_size = roi_size
def random_size(self, img_size):
# left = random.randint(0,img_size[1] - self.roi_size - 1)
# bottom = random.randint(0, img_size[1] - self.roi_size - 1)
left = random.randint(0,max(img_size) - self.roi_size - 1)
bottom = random.randint(0, max(img_size) - self.roi_size - 1)
return (left, bottom)
def crop_data(self, img, left_top_point, types):
if types == 'train':
return img[:,left_top_point[0]:left_top_point[0]+self.roi_size,left_top_point[1]:left_top_point[1]+self.roi_size]
elif types == 'test':
return img[:,:,left_top_point[0]:left_top_point[0]+self.roi_size,left_top_point[1]:left_top_point[1]+self.roi_size]
def __call__(self, sample, types='train'):
left_top_point = self.random_size(sample['label'].shape)
for i in list(sample.keys()):
sample[i] = self.crop_data(sample[i], left_top_point, types)
return sample
class LabelCrop:
def __init__(self, roi_size:int):
self.roi_size = roi_size
def read_label_point(self, label):
label_data = torch.where(label==1)
if len(label_data[0]) == 0:
return (0,256,256)
rnd_point = random.randint(0, len(label_data[0])-1)
return (0, label_data[1][rnd_point], label_data[2][rnd_point])
def get_crop_point(self, data):
roi_size = self.roi_size // 2
left_point = data[-2] - roi_size
if left_point <0:
return (0, self.roi_size)
top_point = data[-1] - roi_size
if top_point < 0:
return (self.roi_size, 0)
return (left_point, top_point)
def crop_data(self, img, left_top_point, types):
if types == 'train':
return img[:,left_top_point[0]:left_top_point[0]+self.roi_size,left_top_point[1]:left_top_point[1]+self.roi_size]
elif types == 'test':
return img[:,:,left_top_point[0]:left_top_point[0]+self.roi_size,left_top_point[1]:left_top_point[1]+self.roi_size]
def __call__(self, sample, types='train'):
if types =='test':
return sample
label_point = self.read_label_point(sample['label'])
left_top_point = self.get_crop_point(label_point)
for i in list(sample.keys()):
sample[i] = self.crop_data(sample[i], left_top_point, types)
return sample
class GenerateMask:
def __init__(self, num_patches, prob:float=0.3):
self.num_patches = num_patches
self.prob = prob
def mask(self, data):
if float(data.max().item()) < 0:
return data
else:
if random.random() <= self.prob:
return data.quantile(0.5, keepdim=True)
else:
return data
def scan_data(self, data):
shape = data.shape[-1]
for i in range(shape // self.num_patches):
for j in range(shape // self.num_patches):
data[:, i*self.num_patches:(i+1)*self.num_patches, j*self.num_patches:(j+1)*self.num_patches] = self.mask(data[:, i*self.num_patches:(i+1)*self.num_patches, j*self.num_patches:(j+1)*self.num_patches])
return data
def __call__(self, sample):
for i in list(sample.keys()):
if i == 'label':
sample[i] = sample[i]
else:
sample[i] = self.scan_data(sample[i])
return sample
```
#### File: MedicalZoo/utils/logger.py
```python
import torch
import numpy as np
import random
from visdom import Visdom
def tensor2img(x: torch.Tensor) -> np.ndarray:
image = x[0].cpu().float().numpy()
image = (image - image.min())/(image.max() - image.min()) *255 if image.max() > 0 else image
image = image if len(image.shape) == 3 else image[:,random.randint(0,32-1),:]
assert len(image.shape) == 3, "Image should have 3 dimensions"
return image.astype(np.uint8)
class Logger(object):
def __init__(
self,
n_epochs:int,
batch_epochs:int,
port:int=8889,
env:str='main'
):
self.viz = Visdom(port=port, use_incoming_socket=False, env=env)
self.n_epochs = n_epochs
self.batch_epochs = batch_epochs
self.batch = 1
self.epoch = 1
self.loss_window = {}
self.losses = {}
self.images_window = {}
def log(self, losses: dict=None, images: dict=None):
for i, loss_name in enumerate(losses.keys()):
if loss_name not in self.losses:
# print(losses[loss_name].item(), type(losses[loss_name]))
self.losses[loss_name] = losses[loss_name].item()
# self.losses[loss_name] = losses[loss_name].data[0]
else:
self.losses[loss_name] += losses[loss_name].item()
# self.losses[loss_name] += losses[loss_name].date[0]
# losses value
# if (self.batch % self.batch_epochs) == 0:
if (self.batch % self.batch_epochs) == 0:
for loss_name, loss in self.losses.items():
if loss_name not in self.loss_window:
self.loss_window[loss_name] = self.viz.line(X = np.array([self.epoch]), Y=np.array([loss/self.batch]), opts={'xlabel':'step', 'ylabel': loss_name, 'title': loss_name})
else:
self.viz.line(X = np.array([self.epoch]), Y=np.array([loss/self.batch]), win=self.loss_window[loss_name], update='append',opts={'xlabel':'step', 'ylabel': loss_name, 'title': loss_name})
self.losses[loss_name] = 0
self.epoch += 1
self.batch = 1
else:
self.batch += 1
# plot images
for image_name, image in images.items():
# print(image_name, type(image), image.shape, image.data)
if image_name not in self.images_window:
self.images_window[image_name] = self.viz.image(tensor2img(image.data), opts={'title':image_name})
else:
self.viz.image(tensor2img(image.data), win=self.images_window[image_name],opts={'title':image_name})
class MetricTracker(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
``` |
{
"source": "johnmathews/quant1",
"score": 3
} |
#### File: lectures/correlation/Momentum_with_Correlation_Reduction_Algorithm.py
```python
import numpy as np
import scipy
import pandas as pd
def initialize(context):
context.lookback = 300
context.return_window = 50
context.longleverage = 0.5
context.shortleverage = -0.5
context.reduce_correlation = True
# There's bad data for this security so I ignore it
context.ignores = [sid(7143)]
schedule_function(trade,
date_rule=date_rules.month_start(),
time_rule=time_rules.market_open(minutes=20))
def handle_data(context, data):
leverage=context.account.leverage
exposure=context.account.net_leverage
record(leverage=leverage, exposure=exposure)
def trade(context, data):
prices = np.log(history(context.lookback, '1d', 'price').dropna(axis=1))
R = (prices / prices.shift(context.return_window)).dropna()
# Subtract the cross-sectional average out of each data point on each day.
ranks = (R.T - R.T.mean()).T.mean()
# Take the top and botton percentiles for the long and short baskets
lower, upper = ranks.quantile([.05, .95])
shorts = ranks[ranks <= lower]
longs = ranks[ranks >= upper]
# Get weights that reduce the correlation within each basket
if context.reduce_correlation:
daily_R = prices.pct_change().dropna()
longs = get_reduced_correlation_weights(daily_R[longs.index])
shorts = get_reduced_correlation_weights(daily_R[shorts.index])
else:
# Use even weights
longs = longs.abs()
longs /= longs.sum()
shorts = shorts.abs()
shorts /= shorts.sum()
for stock in data:
if stock in context.ignores:
continue
try:
if stock in shorts.index:
order_target_percent(stock,
context.shortleverage * shorts[stock])
elif stock in longs.index:
order_target_percent(stock,
context.longleverage * longs[stock])
else:
order_target(stock, 0)
except:
log.warn("[Failed Order] stock = %s"%stock.symbol)
def get_reduced_correlation_weights(returns, risk_adjusted=True):
"""
Implementation of minimum correlation algorithm.
ref: http://cssanalytics.com/doc/MCA%20Paper.pdf
:Params:
:returns <Pandas DataFrame>:Timeseries of asset returns
:risk_adjusted <boolean>: If True, asset weights are scaled
by their standard deviations
"""
correlations = returns.corr()
adj_correlations = get_adjusted_cor_matrix(correlations)
initial_weights = adj_correlations.T.mean()
ranks = initial_weights.rank()
ranks /= ranks.sum()
weights = adj_correlations.dot(ranks)
weights /= weights.sum()
if risk_adjusted:
weights = weights / returns.std()
weights /= weights.sum()
return weights
def get_adjusted_cor_matrix(cor):
values = cor.values.flatten()
mu = np.mean(values)
sigma = np.std(values)
distribution = scipy.stats.norm(mu, sigma)
return 1 - cor.apply(lambda x: distribution.cdf(x))
def before_trading_start(context):
num_stocks = 500
fundamental_df = get_fundamentals(
query(
# To add a metric. Start by typing "fundamentals."
fundamentals.valuation.market_cap,
)
.filter(fundamentals.valuation.market_cap > 1e8)
.order_by(fundamentals.valuation.market_cap.desc())
.limit(num_stocks)
)
update_universe(fundamental_df)
``` |
{
"source": "johnmbaughman/suplemon",
"score": 3
} |
#### File: suplemon/suplemon/editor.py
```python
from . import helpers
from .line import Line
from .cursor import Cursor
from .viewer import Viewer
class State:
"""Store editor state for undo/redo."""
def __init__(self, editor=None):
self.cursors = [Cursor()]
self.lines = [Line()]
self.y_scroll = 0
self.x_scroll = 0
self.last_find = ""
if editor is not None:
self.store(editor)
def store(self, editor):
"""Store the state of editor instance."""
self.cursors = [cursor.tuple() for cursor in editor.cursors]
self.lines = [line.data for line in editor.lines]
self.y_scroll = editor.y_scroll
self.x_scroll = editor.x_scroll
self.last_find = editor.last_find
def restore(self, editor):
"""Restore stored state into the editor instance."""
editor.cursors = [Cursor(cursor) for cursor in self.cursors]
editor.lines = [Line(line) for line in self.lines]
editor.y_scroll = self.y_scroll
editor.x_scroll = self.x_scroll
editor.last_find = self.last_find
class Editor(Viewer):
"""Extends Viewer with editing capabilities."""
def __init__(self, app, window):
"""Initialize the editor.
Args:
app: The Suplemon main instance.
window: A window object to use for the ui.
"""
Viewer.__init__(self, app, window)
# History of editor states for undo/redo
self.history = [State()]
# Current state index of the editor
self.current_state = 0
# Last editor action that was used (for undo/redo)
self.last_action = None
def init(self):
Viewer.init(self)
operations = {
"backspace": self.backspace, # Backspace
"delete": self.delete, # Delete
"insert": self.insert, # Insert
"enter": self.enter, # Enter
"tab": self.tab, # Tab
"untab": self.untab, # Shift + Tab
"escape": self.escape, # Escape
"single_selection": self.single_selection, # Escape
"clear_last_find": self.clear_last_find, # Escape
"new_cursor_up": self.new_cursor_up, # Alt + Up
"new_cursor_down": self.new_cursor_down, # Alt + Down
"new_cursor_left": self.new_cursor_left, # Alt + Left
"new_cursor_right": self.new_cursor_right, # Alt + Right
"page_up": self.page_up, # Page Up
"page_down": self.page_down, # Page Down
"push_up": self.push_up, # Alt + Page Up
"push_down": self.push_down, # Alt + Page Down
"undo": self.undo, # F5
"redo": self.redo, # F6
"toggle_line_nums": self.toggle_line_nums, # F9
"toggle_line_ends": self.toggle_line_ends, # F10
"toggle_highlight": self.toggle_highlight, # F11
"copy": self.copy, # Ctrl + C
"cut": self.cut, # Ctrl + X
"duplicate_line": self.duplicate_line, # Ctrl + W
}
for key in operations.keys():
self.operations[key] = operations[key]
def set_buffer(self, buffer):
"""Sets local or global buffer depending on config."""
if self.app.config["editor"]["use_global_buffer"]:
self.app.global_buffer = buffer
else:
self.buffer = buffer
def set_data(self, data):
"""Set the editor text contents."""
Viewer.set_data(self, data)
buffer = self.get_buffer() # TODO: check this
if len(buffer) > 1:
self.store_state()
else:
state = State()
state.store(self)
self.history[0] = state
def store_action_state(self, action, state=None):
"""Store the editor state if a new action is taken."""
if self.last_action != action:
self.last_action = action
self.store_state(state)
else:
# FIXME: This if is here just for safety.
# FIXME: current_state might be wrong ;.<
if self.current_state < len(self.history)-1:
self.history[self.current_state].store(self)
def store_state(self, state=None, action=None):
"""Store the current editor state for undo/redo."""
if state is None:
state = State()
state.store(self)
if len(self.history) > 1:
if self.current_state < len(self.history)-1:
self.history = self.history[:self.current_state]
self.history.append(state)
self.current_state = len(self.history)-1
if len(self.history) > self.config["max_history"]:
self.history.pop(0)
def restore_state(self, index=None):
"""Restore an editor state."""
if len(self.history) <= 1:
return False
if index is None:
index = self.current_state-1
if index < 0 or index >= len(self.history):
return False
# if self.current_state < len(self.history):
# self.current_state = self.current_state-1
state = self.history[index]
state.restore(self)
self.current_state = index
def handle_input(self, event):
done = Viewer.handle_input(self, event)
if not done:
if event.is_typeable:
if isinstance(event.key_code, str):
self.type(event.key_code)
elif event.key_name:
self.type(event.key_name)
return True
return False
def undo(self):
"""Undo the last command or change."""
self.last_action = "undo"
self.restore_state()
def redo(self):
"""Redo the last command or change."""
self.last_action = "redo"
if self.current_state == len(self.history)-1:
return False
index = self.current_state+1
self.restore_state(index)
#
# Cursor operations
#
def new_cursor_up(self):
"""Add a new cursor one line up."""
x = self.get_cursor().x
cursor = self.get_first_cursor()
if cursor.y == 0:
return
new = Cursor(x, cursor.y-1)
self.cursors.append(new)
self.move_cursors()
self.scroll_up()
def new_cursor_down(self):
"""Add a new cursor one line down."""
x = self.get_cursor().x
cursor = self.get_last_cursor()
if cursor.y == len(self.lines)-1:
return
new = Cursor(x, cursor.y+1)
self.cursors.append(new)
self.move_cursors()
self.scroll_down()
def new_cursor_left(self):
"""Add a new cursor one character left."""
new = []
for cursor in self.cursors:
if cursor.x == 0:
continue
new.append(Cursor(cursor.x-1, cursor.y))
for c in new:
self.cursors.append(c)
self.move_cursors()
self.scroll_up()
def new_cursor_right(self):
"""Add a new cursor one character right."""
new = []
for cursor in self.cursors:
if cursor.x+1 > len(self.lines[cursor.y]):
continue
new.append(Cursor(cursor.x+1, cursor.y))
for c in new:
self.cursors.append(c)
self.move_cursors()
self.scroll_down()
def escape(self):
"""Handle escape key.
Wrapper for clear_last_find and single_selection."""
self.clear_last_find()
self.single_selection()
def clear_last_find(self):
"""Removes last_find so a new auto-find can be initiated."""
self.last_find = ""
def single_selection(self):
"""Removes all cursors except primary cursor."""
self.cursors = [self.cursors[0]]
self.move_cursors()
#
# Text editing operations
#
def replace_all(self, what, replacement):
"""Replaces what with replacement on each line."""
for line in self.lines:
data = line.get_data()
new = data.replace(what, replacement)
line.set_data(new)
self.move_cursors()
def delete(self):
"""Delete the next character."""
for cursor in self.cursors:
if len(self.lines)-1 < cursor.y:
# If we've run out of lines
break
line = self.lines[cursor.y]
# if we have more than 1 line
# and we're at the end of the current line
# and we're not on the last line
if len(self.lines) > 1 and cursor.x == len(line) and cursor.y != len(self.lines) - 1:
data = self.lines[cursor.y].get_data()
self.lines.pop(cursor.y)
self.lines[cursor.y].set_data(data+self.lines[cursor.y])
# Reposition cursors from line below into correct positions on current line
line_cursors = self.get_cursors_on_line(cursor.y+1)
for c in line_cursors:
c.move_right(len(data))
c.move_up()
self.move_y_cursors(cursor.y, -1)
else:
start = line[:cursor.x]
end = line[cursor.x+1:]
self.lines[cursor.y].set_data(start+end)
self.move_x_cursors(cursor.y, cursor.x, -1)
self.move_cursors()
# Add a restore point if previous action != delete
self.store_action_state("delete")
def backspace(self):
"""Delete the previous character."""
curs = reversed(sorted(self.cursors, key=lambda c: (c[1], c[0])))
# Iterate through all cursors from bottom to top
for cursor in curs:
line_no = cursor.y
# If we're at the beginning of file don't do anything
if cursor.x == 0 and cursor.y == 0:
continue
# If were operating at the beginning of a line
if cursor.x == 0 and cursor.y != 0:
curr_line = self.lines.pop(line_no)
prev_line = self.lines[line_no-1]
length = len(prev_line) # Get the length of previous line
# Add the current line to the previous one
new_data = self.lines[cursor.y-1] + curr_line
self.lines[cursor.y-1].set_data(new_data)
# Get all cursors on current line
line_cursors = self.get_cursors_on_line(line_no)
for line_cursor in line_cursors: # Move the cursors
line_cursor.move_up()
# Add the length of previous line to each x coordinate
# so that their relative positions
line_cursor.move_right(length)
# Move all cursors below up one line
# (since a line was removed above them)
self.move_y_cursors(cursor.y, -1)
# Handle all other cases
else:
curr_line = self.lines[line_no]
# Remove one character by default
del_n_chars = 1
# Check if we should unindent
if self.config["backspace_unindent"]:
# Check if we can unindent, and that it's actually whitespace
# We don't do this for hard tabs since they're just a single character
if not self.config["hard_tabs"]:
indent = self.config["tab_width"]
if cursor.x >= indent:
if curr_line[cursor.x-indent:cursor.x] == indent*" ":
# Remove an indents worth of whitespace
del_n_chars = indent
# Slice characters out of the line
start = curr_line[:cursor.x-del_n_chars]
end = curr_line[cursor.x:]
# Store the new line
self.lines[line_no].set_data(start+end)
# Move the operating curser back the deleted amount
cursor.move_left(del_n_chars)
# Do the same to the rest
self.move_x_cursors(line_no, cursor.x, -1*del_n_chars)
# Ensure we keep the view scrolled
self.move_cursors()
self.scroll_up()
# Add a restore point if previous action != backspace
self.store_action_state("backspace")
def enter(self):
"""Insert a new line at each cursor."""
# We sort the cursors, and loop through them from last to first
# That way we avoid messing with
# the relative positions of the higher cursors
curs = sorted(self.cursors, key=lambda c: (c[1], c[0]))
curs = reversed(curs)
for cursor in curs:
# The current line this cursor is on
line = self.lines[cursor.y]
# Start of the line
start = line[:cursor.x]
# End of the line
end = line[cursor.x:]
# Leave the beginning of the line
self.lines[cursor.y].set_data(start)
wspace = ""
if self.config["auto_indent_newline"]:
wspace = helpers.whitespace(self.lines[cursor.y])*" "
self.lines.insert(cursor.y+1, Line(wspace+end))
self.move_y_cursors(cursor.y, 1)
cursor.set_x(len(wspace))
cursor.move_down()
self.move_cursors()
self.scroll_down()
# Add a restore point if previous action != enter
self.store_action_state("enter")
def insert(self):
"""Insert buffer data at cursor(s)."""
cur = self.get_cursor()
buffer = list(self.get_buffer())
# If we have more than one cursor
# Or one cursor and one line
if len(self.cursors) > 1 or len(buffer) == 1:
# If the cursor count is more than the buffer length extend
# the buffer until it's at least as long as the cursor count
while len(buffer) < len(self.cursors):
buffer.extend(buffer)
curs = sorted(self.cursors, key=lambda c: (c[1], c[0]))
for cursor in curs:
line = self.lines[cursor.y]
buf = buffer[0]
line = line[:cursor.x] + buf + line[cursor.x:]
self.lines[cursor.y].set_data(line)
buffer.pop(0)
self.move_x_cursors(cursor.y, cursor.x-1, len(buf))
# If we have one cursor and multiple lines
else:
for buf in buffer:
y = cur[1]
if y < 0:
y = 0
self.lines.insert(y, Line(buf))
self.move_y_cursors(cur[1]-1, 1)
self.move_cursors()
self.scroll_down()
# Add a restore point if previous action != insert
self.store_action_state("insert")
def insert_lines_at(self, lines, at):
rev_lines = reversed(lines)
for line in rev_lines:
self.lines.insert(at, Line(line))
self.move_y_cursors(at, len(lines))
def push_up(self):
"""Move current lines up by one line."""
used_y = []
curs = sorted(self.cursors, key=lambda c: (c[1], c[0]))
for cursor in curs:
if cursor.y in used_y:
continue
used_y.append(cursor.y)
if cursor.y == 0:
break
old = self.lines[cursor.y-1]
self.lines[cursor.y-1] = self.lines[cursor.y]
self.lines[cursor.y] = old
self.move_cursors((0, -1))
self.scroll_up()
# Add a restore point if previous action != push_up
self.store_action_state("push_up")
def push_down(self):
"""Move current lines down by one line."""
used_y = []
curs = reversed(sorted(self.cursors, key=lambda c: (c[1], c[0])))
for cursor in curs:
if cursor.y in used_y:
continue
if cursor.y >= len(self.lines)-1:
break
used_y.append(cursor.y)
old = self.lines[cursor.y+1]
self.lines[cursor.y+1] = self.lines[cursor.y]
self.lines[cursor.y] = old
self.move_cursors((0, 1))
self.scroll_down()
# Add a restore point if previous action != push_down
self.store_action_state("push_down")
def tab(self):
"""Indent lines."""
# Add a restore point if previous action != tab
self.store_action_state("tab")
if not self.config["hard_tabs"]:
self.type(" "*self.config["tab_width"])
else:
self.type("\t")
def untab(self):
"""Unindent lines."""
linenums = []
# String to compare tabs to
tab = " "*self.config["tab_width"]
if self.config["hard_tabs"]:
tab = "\t"
width = len(tab)
for cursor in self.cursors:
line = self.lines[cursor.y]
if cursor.y in linenums:
cursor.x = helpers.whitespace(line)
continue
elif line[:width] == tab:
line = Line(line[width:])
self.lines[cursor.y] = line
cursor.x = helpers.whitespace(line)
linenums.append(cursor.y)
# Add a restore point if previous action != untab
self.store_action_state("untab")
def copy(self):
"""Copy lines to buffer."""
# Store cut lines in buffer
copy_buffer = []
# Get all lines with cursors on them
line_nums = self.get_lines_with_cursors()
for i in range(len(line_nums)):
# Get the line
line = self.lines[line_nums[i]]
# Put it in our temporary buffer
copy_buffer.append(line.get_data())
self.set_buffer(copy_buffer)
self.store_action_state("copy")
def cut(self):
"""Cut lines to buffer."""
# Store cut lines in buffer
cut_buffer = []
# Get all lines with cursors on them
line_nums = self.get_lines_with_cursors()
# Sort from last to first (invert order)
line_nums = line_nums[::-1]
for i in range(len(line_nums)): # Iterate from last to first
# Make sure we don't completely remove the last line
if len(self.lines) == 1:
cut_buffer.append(self.lines[0])
self.lines[0] = Line()
break
# Get the current line
line_no = line_nums[i]
# Get and remove the line
line = self.lines.pop(line_no)
# Put it in our temporary buffer
cut_buffer.append(line)
# Move all cursors below the current line up
self.move_y_cursors(line_no, -1)
self.move_cursors() # Make sure cursors are in valid places
# Reverse the buffer to get correct order and store it
self.set_buffer(cut_buffer[::-1])
self.store_action_state("cut")
def type(self, data):
"""Insert data at each cursor position."""
for cursor in self.cursors:
self.type_at_cursor(cursor, data)
self.move_cursors()
# Add a restore point if previous action != type
self.store_action_state("type")
def type_at_cursor(self, cursor, data):
"""Insert data at specified cursor."""
line = self.lines[cursor.y]
start = line[:cursor.x]
end = line[cursor.x:]
self.lines[cursor.y].set_data(start + data + end)
self.move_x_cursors(cursor.y, cursor.x, len(data))
cursor.move_right(len(data))
def go_to_pos(self, line_no, col=0):
"""Move primary cursor to line_no, col=0."""
if line_no < 0:
line_no = len(self.lines)-1
else:
line_no = line_no-1
self.store_state()
cur = self.get_cursor()
if col is not None:
cur.x = col
cur.y = line_no
if cur.y >= len(self.lines):
cur.y = len(self.lines)-1
self.scroll_to_line(cur.y)
self.move_cursors()
def duplicate_line(self):
"""Copy current line and add it below as a new line."""
curs = sorted(self.cursors, key=lambda c: (c.y, c.x))
for cursor in curs:
line = Line(self.lines[cursor.y])
self.lines.insert(cursor.y+1, line)
self.move_y_cursors(cursor.y, 1)
self.move_cursors()
self.store_action_state("duplicate_line")
```
#### File: suplemon/suplemon/helpers.py
```python
import os
import re
import sys
import time
import traceback
def curr_time():
"""Current time in %H:%M"""
return time.strftime("%H:%M")
def curr_time_sec():
"""Current time in %H:%M:%S"""
return time.strftime("%H:%M:%S")
def multisplit(data, delimiters):
pattern = "|".join(map(re.escape, delimiters))
return re.split(pattern, data)
def get_error_info():
"""Return info about last error."""
msg = "{0}\n{1}".format(str(traceback.format_exc()), str(sys.exc_info()))
return msg
def get_string_between(start, stop, s):
"""Search string for a substring between two delimeters. False if not found."""
i1 = s.find(start)
if i1 == -1:
return False
s = s[i1 + len(start):]
i2 = s.find(stop)
if i2 == -1:
return False
s = s[:i2]
return s
def whitespace(line):
"""Return index of first non whitespace character on a line."""
i = 0
for char in line:
if char != " ":
break
i += 1
return i
def parse_path(path):
"""Parse a relative path and return full directory and filename as a tuple."""
if path[:2] == "~" + os.sep:
p = os.path.expanduser("~")
path = os.path.join(p+os.sep, path[2:])
ab = os.path.abspath(path)
parts = os.path.split(ab)
return parts
```
#### File: suplemon/suplemon/lexer.py
```python
import pygments
import pygments.token
import pygments.lexers
class Lexer:
def __init__(self, app):
self.app = app
self.token_map = {
pygments.token.Comment: "comment",
pygments.token.Comment.Single: "comment",
pygments.token.Operator: "keyword",
pygments.token.Name.Function: "entity.name.function",
pygments.token.Name.Class: "entity.name.class",
pygments.token.Name.Tag: "entity.name.tag",
pygments.token.Name.Attribute: "entity.other.attribute-name",
pygments.token.Name.Variable: "variable",
pygments.token.Name.Builtin.Pseudo: "constant.language",
pygments.token.Literal.String: "string",
pygments.token.Literal.String.Doc: "string",
pygments.token.Punctuation: "punctuation",
pygments.token.Literal.Number: "constant.numeric",
pygments.token.Name: "entity.name",
pygments.token.Keyword: "keyword",
pygments.token.Generic.Deleted: "invalid",
}
def lex(self, code, lex):
"""Return tokenified code.
Return a list of tuples (scope, word) where word is the word to be
printed and scope the scope name representing the context.
:param str code: Code to tokenify.
:param lex: Lexer to use.
:return:
"""
if lex is None:
if not type(code) is str:
# if not suitable lexer is found, return decoded code
code = code.decode("utf-8")
return (("global", code),)
words = pygments.lex(code, lex)
scopes = []
for word in words:
token = word[0]
scope = "global"
if token in self.token_map.keys():
scope = self.token_map[token]
scopes.append((scope, word[1]))
return scopes
```
#### File: suplemon/linelight/diff.py
```python
from suplemon.linelight.color_map import color_map
class Syntax:
def get_comment(self):
return ("/*", "*/")
def get_color(self, raw_line):
color = color_map["white"]
line = str(raw_line)
if line.startswith("+"):
color = color_map["green"]
elif line.startswith("-"):
color = color_map["red"]
elif line.startswith("@@"):
color = color_map["blue"]
return color
```
#### File: suplemon/modules/autocomplete.py
```python
import re
from suplemon import helpers
from suplemon.suplemon_module import Module
class AutoComplete(Module):
"""
A simple autocompletion module.
This adds autocomplete support for the tab key. It uses a word
list scanned from all open files for completions. By default it suggests
the shortest possible match. If there are no matches, the tab action is
run normally.
"""
def init(self):
self.word_list = []
self.bind_event("tab", self.auto_complete)
self.bind_event_after("app_loaded", self.build_word_list)
self.bind_event_after("save_file", self.build_word_list)
self.bind_event_after("save_file_as", self.build_word_list)
def get_separators(self):
"""Return list of word separators obtained from app config.
:return: String with all separators.
:rtype: str
"""
separators = self.app.config["editor"]["punctuation"]
# Support words with underscores
separators = separators.replace("_", "")
return separators
def build_word_list(self, *args):
"""Build the word list based on contents of open files."""
word_list = []
for file in self.app.files:
data = file.get_editor().get_data()
words = helpers.multisplit(data, self.get_separators())
for word in words:
# Discard undesired whitespace
word = word.strip()
# Must be longer than 1 and not yet in word_list
if len(word) > 1 and word not in word_list:
word_list.append(word)
self.word_list = word_list
return False
def get_match(self, word):
"""Find a completable match for word.
:param word: The partial word to complete
:return: The completion to add to the partial word
:rtype: str
"""
if not word:
return False
# Build list of suitable matches
candidates = []
for candidate in self.word_list:
if candidate.startswith(word) and len(candidate) > len(word):
candidates.append(candidate)
# Find the shortest match
# TODO: implement cycling through matches
shortest = ""
for candidate in candidates:
if not shortest:
shortest = candidate
continue
if len(candidate) < len(shortest):
shortest = candidate
if shortest:
return shortest[len(word):]
return False
def run(self, app, editor, args):
"""Run the autocompletion."""
self.auto_complete()
def auto_complete(self, event):
"""Attempt to autocomplete at each cursor position.
This callback runs before the tab action and tries to autocomplete
the current word. If a match is found the tab action is inhibited.
:param event: The event object.
:return: True if a match is found.
"""
editor = self.app.get_editor()
pattern = "|".join(map(re.escape, self.get_separators()))
matched = False
for cursor in editor.cursors:
line = editor.lines[cursor.y][:cursor.x]
words = re.split(pattern, line)
last_word = words[-1]
match = self.get_match(last_word)
if match:
matched = True
editor.type_at_cursor(cursor, match)
return matched
module = {
"class": AutoComplete,
"name": "autocomplete",
}
```
#### File: suplemon/modules/bulk_delete.py
```python
from suplemon.suplemon_module import Module
class BulkDelete(Module):
"""
Bulk delete lines and characters.
Asks what direction to delete in by default.
Add 'up' to delete lines above highest cursor.
Add 'down' to delete lines below lowest cursor.
Add 'left' to delete characters to the left of all cursors.
Add 'right' to delete characters to the right of all cursors.
"""
def init(self):
self.directions = ["up", "down", "left", "right"]
def handler(self, prompt, event):
# Get arrow keys from prompt
if event.key_name in self.directions:
prompt.set_data(event.key_name)
prompt.on_ready()
return True # Disable normal key handling
def run(self, app, editor, args):
direction = args.lower()
if not direction:
direction = app.ui.query_filtered("Press arrow key in direction to delete:", handler=self.handler)
if direction not in self.directions:
app.set_status("Invalid direction.")
return False
# Delete entire lines
if direction == "up":
pos = editor.get_first_cursor()
length = len(editor.lines)
editor.lines = editor.lines[pos.y:]
delta = length - len(editor.lines)
# If lines were removed, move the cursors up the same amount
if delta:
editor.move_cursors((0, -delta))
elif direction == "down":
pos = editor.get_last_cursor()
editor.lines = editor.lines[:pos.y+1]
# Delete from start or end of lines
else:
# Select min/max function based on direction
func = min if direction == "left" else max
# Get all lines with cursors
line_indices = editor.get_lines_with_cursors()
for line_no in line_indices:
# Get all cursors for the line
line_cursors = editor.get_cursors_on_line(line_no)
# Get the leftmost of rightmost x coordinate
x = func(line_cursors, key=lambda c: c.x).x
# Delete correct part of the line
line = editor.lines[line_no]
if direction == "left":
line.data = line.data[x:]
# Also move cursors appropriately when deleting left side
[c.move_left(x) for c in line_cursors]
else:
line.data = line.data[:x]
module = {
"class": BulkDelete,
"name": "bulk_delete",
}
```
#### File: suplemon/modules/diff.py
```python
import difflib
from suplemon.suplemon_module import Module
class Diff(Module):
"""View a diff of the current file compared to it's on disk version."""
def run(self, app, editor, args):
curr_file = app.get_file()
curr_path = curr_file.get_path()
if not curr_path:
self.app.set_status("File hasn't been saved, can't show diff.")
return False
current_data = editor.get_data()
f = open(curr_path)
original_data = f.read()
f.close()
diff = self.get_diff(original_data, current_data)
if not diff:
self.app.set_status("The file in the editor and on disk are identical.")
return False
file = app.new_file()
file.set_name(curr_file.get_name() + ".diff")
file.set_data(diff)
app.switch_to_file(app.last_file_index())
def get_diff(self, a, b):
a = a.splitlines(1)
b = b.splitlines(1)
diff = difflib.unified_diff(a, b)
return "".join(diff)
module = {
"class": Diff,
"name": "diff",
}
```
#### File: suplemon/modules/hostname.py
```python
import socket
from suplemon.suplemon_module import Module
class Hostname(Module):
"""Shows the machine hostname in the bottom status bar."""
def init(self):
self.hostname = ""
hostinfo = None
try:
hostinfo = socket.gethostbyaddr(socket.gethostname())
except:
self.logger.debug("Failed to get hostname.")
if hostinfo:
self.hostname = hostinfo[0]
# Use shorter hostname if available
if hostinfo[1]:
self.hostname = hostinfo[1][0]
def get_status(self):
if self.hostname:
return "host:{0}".format(self.hostname)
return ""
module = {
"class": Hostname,
"name": "hostname",
"status": "bottom",
}
```
#### File: suplemon/modules/rstrip.py
```python
from suplemon.suplemon_module import Module
class RStrip(Module):
"""Trim whitespace from the end of lines."""
def run(self, app, editor, args):
line_nums = editor.get_lines_with_cursors()
for n in line_nums:
line = editor.lines[n]
line.set_data(line.data.rstrip())
module = {
"class": RStrip,
"name": "rstrip",
}
```
#### File: suplemon/modules/tabstospaces.py
```python
from suplemon.suplemon_module import Module
class TabsToSpaces(Module):
"""Convert tab characters to spaces in the entire file."""
def run(self, app, editor, args):
for i, line in enumerate(editor.lines):
new = line.data.replace("\t", " "*editor.config["tab_width"])
editor.lines[i].set_data(new)
module = {
"class": TabsToSpaces,
"name": "tabstospaces",
}
``` |
{
"source": "johnmccambridge7/Ling-28-Final-Project",
"score": 3
} |
#### File: johnmccambridge7/Ling-28-Final-Project/testing.py
```python
import numpy as np
from classifier import classify
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
def is_correct(results, label):
minimum = float
VALIDATION_ANGER = open("datasets/testing/validation_anger.txt").read().split("\n")
VALIDATION_SAD = open("datasets/testing/validation_sad.txt").read().split("\n")
VALIDATION_JOY = open("datasets/testing/validation_joy.txt").read().split("\n")
truth = ["#-ANGER", "#-ANGER", "#-ANGER", "#-ANGER", "#-ANGER", "#-ANGER", "#-ANGER", "#-ANGER", "#-ANGER", "#-ANGER",
"#-SAD", "#-SAD", "#-SAD", "#-SAD", "#-SAD", "#-SAD", "#-SAD", "#-SAD", "#-SAD", "#-SAD",
"#-JOY", "#-JOY", "#-JOY", "#-JOY", "#-JOY", "#-JOY", "#-JOY", "#-JOY", "#-JOY", "#-JOY"]
predictions = []
"""for phrase in VALIDATION_ANGER:
print("Classifying Anger:")
score, label = classify(phrase)
predictions.append(label)
print("==========================")
for phrase in VALIDATION_SAD:
print("Classifying Sad:")
score, label = classify(phrase)
predictions.append(label)
print("==========================")
for phrase in VALIDATION_JOY:
print("Classifying Joy:")
score, label = classify(phrase)
predictions.append(label)
print("==========================")"""
predictions = ["#-ANGER", "#-ANGER", "#-ANGER", "#-ANGER", "#-ANGER", "#-ANGER", "#-ANGER", "#-ANGER", "#-JOY", "#-ANGER", "#-ANGER", "#-JOY", "#-SAD", "#-SAD", "#-ANGER", "#-SAD", "#-SAD", "#-SAD", "#-SAD", "#-JOY", "#-JOY", "#-JOY", "#-JOY", "#-JOY", "#-JOY", "#-JOY", "#-JOY", "#-ANGER", "#-SAD", "#-JOY"]
names = ["#-JOY", "#-ANGER", "#-SAD"]
cm = confusion_matrix(truth, predictions, labels=names)
report = classification_report(truth, predictions, labels=names)
print(cm)
print(predictions)
print(report)
``` |
{
"source": "johnmcconnell/dartlang-sdk",
"score": 2
} |
#### File: dartlang-sdk/tools/write_dartdoc_options_file.py
```python
import argparse
import sys
import utils
def ParseArgs(args):
args = args[1:]
parser = argparse.ArgumentParser(
description='A script to write a custom dartdoc_options.yaml to a file')
parser.add_argument(
'--output', '-o', type=str, required=True, help='File to write')
return parser.parse_args(args)
def Main(argv):
args = ParseArgs(argv)
# TODO(jcollins-g): switch to version numbers when github has its tags synced
revision = utils.GetGitRevision()
if revision is None:
revision = 'master'
output = '''dartdoc:
categoryOrder: ["Core", "VM", "Web"]
linkToSource:
root: '.'
uriTemplate: 'https://github.com/dart-lang/sdk/blob/%s/sdk/%%f%%#L%%l%%'
''' % revision
with open(args.output, 'w') as f:
f.write(output)
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv))
``` |
{
"source": "johnmcdouall/pypki2",
"score": 3
} |
#### File: pypki2/pypki2/config.py
```python
from .exceptions import PyPKI2Exception
from .p12 import P12Loader
from .pem import CALoader, PEMLoader
from .utils import in_ipython, in_nbgallery, input23
from time import sleep
try:
import ssl
except ImportError:
raise PyPKI2Exception('Cannot use pypki2. This instance of Python was not compiled with SSL support. Try installing openssl-devel and recompiling.')
import json
import os
class Configuration(object):
def __init__(self, filename=None):
self.config = {}
self.changed = False
if filename is not None and os.path.exists(filename):
try:
with open(filename, 'r') as f:
j = json.load(f)
except ValueError as e:
raise PyPKI2Exception('Unable to parse your .mypki file at {0}. Is it in JSON format?'.format(filename))
for k,v in j.items():
self.config[k] = v
def set(self, k, v):
if k in self.config and self.config[k] == v:
pass
else:
self.config[k] = v
self.changed = True
def get(self, k):
return self.config.get(k, None)
def has(self, k):
return k in self.config
def store(self, filename):
if self.changed:
with open(filename, 'w') as f:
json.dump(self.config, f)
def mypki_config_path():
if 'MYPKI_CONFIG' in os.environ:
p = os.environ['MYPKI_CONFIG'].strip()
d = os.path.split(p)[0]
if os.path.exists(d) and os.path.isdir(d):
return p+os.sep+'mypki_config'
elif os.path.exists(d):
return p
else:
return None
return None
def home_config_path():
if 'HOME' in os.environ:
p = os.environ['HOME']
if os.path.exists(p):
return p+os.sep+'.mypki'
else:
return None
return None
def get_config_path():
p = mypki_config_path()
if p is not None:
return p
p = home_config_path()
if p is not None:
return p
raise PyPKI2Exception('Could not find MYPKI_CONFIG or HOME environment variables. If you are on Windows, you need to add a MYPKI_CONFIG environment variable in Control Panel. See Windows Configuration in README.md for further instructions.')
def pick_loader(loaders):
options = { str(i+1):loaders[i] for i in range(len(loaders)) }
selected = None
while selected is None:
print('Available PKI configuration loaders are:')
for k in sorted(list(options.keys())):
print('{0}) {1}'.format(k, options[k].name))
num = input23('Which type of PKI do you want to configure: ').strip()
if num in options:
selected = options[num]
else:
print('Invalid selection...')
selection = None
return selected
class Loader(object):
def __init__(self):
self.config_path = get_config_path()
self.ipython_config()
self.config = None
self.loader = None
self.ca_loader = None
def ipython_config(self):
temp_config = Configuration(self.config_path)
if temp_config.has('p12') and 'path' in temp_config.get('p12'):
pass
else:
if in_ipython() and in_nbgallery():
from IPython.display import display, Javascript
display(Javascript("MyPKI.init({'no_verify':true, configure:true});"))
print('Configuring .mypki via JavaScript .p12 dialog...')
while True:
temp_config = Configuration(self.config_path)
if temp_config.has('p12') and 'path' in temp_config.get('p12'):
break
else:
sleep(2)
def prepare_loader(self):
if self.loader is None:
self.config = Configuration(self.config_path)
loaders = [ P12Loader(self.config), PEMLoader(self.config) ]
configured_loaders = [ loader for loader in loaders if loader.is_configured() ]
if len(configured_loaders) == 0:
self.loader = pick_loader(loaders)
elif len(configured_loaders) > 0:
self.loader = configured_loaders[0]
else:
raise PyPKI2Exception('No configured PKI loader available.')
self.loader.configure()
self.ca_loader = CALoader(self.config)
self.ca_loader.configure()
self.config.store(self.config_path)
def new_context(self, protocol=ssl.PROTOCOL_SSLv23):
self.prepare_loader()
c = self.loader.new_context(protocol=protocol)
c.verify_mode = ssl.CERT_REQUIRED
ca_filename = self.ca_loader.filename.strip()
if len(ca_filename) == 0:
raise PyPKI2Exception('Certificate Authority (CA) file not specified.')
elif not os.path.exists(ca_filename):
raise PyPKI2Exception('Certificate Authority (CA) file {0} does not exist.'.format(ca_filename))
else:
c.load_verify_locations(cafile=ca_filename)
return c
def dump_key(self, fobj):
self.prepare_loader()
self.loader.dump_key(fobj)
def ca_path(self):
self.prepare_loader()
return self.ca_loader.filename
_pypki2_config_loader = Loader()
def dump_key(fobj):
_pypki_config_loader.dump_key(fobj)
def ca_path():
return _pypki_config_loader.ca_path()
def ssl_context(protocol=ssl.PROTOCOL_SSLv23):
return _pypki2_config_loader.new_context(protocol=protocol)
``` |
{
"source": "johnmcfarlane/conan",
"score": 2
} |
#### File: functional/old/user_info_test.py
```python
import os
import unittest
from conans.paths import CONANFILE
from conans.test.utils.tools import TestClient
class UserInfoTest(unittest.TestCase):
def test_user_info_propagation(self):
client = TestClient()
def export_lib(name, requires, infolines):
base = '''
import os
from conans import ConanFile
class MyConanfile(ConanFile):
name = "%s"
version = "0.1"
requires = "%s"
def build(self):
pass
def package_info(self):
%s
'''
client.save({CONANFILE: base % (name, requires, infolines)}, clean_first=True)
client.run("export . lasote/stable")
export_lib("LIB_A", "", "self.user_info.VAR1=2")
export_lib("LIB_B", "LIB_A/0.1@lasote/stable", "self.user_info.VAR1=2\n "
"self.user_info.VAR2=3")
export_lib("LIB_C", "LIB_B/0.1@lasote/stable", "self.user_info.VAR1=2")
export_lib("LIB_D", "LIB_C/0.1@lasote/stable", "self.user_info.var1=2")
reuse = '''
import os
from conans import ConanFile
class MyConanfile(ConanFile):
name = "reuse"
version = "0.1"
requires = "LIB_D/0.1@lasote/stable"
def build(self):
assert(self.deps_user_info["LIB_A"].VAR1=="2")
assert(self.deps_user_info["LIB_B"].VAR1=="2")
assert(self.deps_user_info["LIB_B"].VAR2=="3")
assert(self.deps_user_info["LIB_C"].VAR1=="2")
assert(self.deps_user_info["LIB_D"].var1=="2")
'''
client.save({CONANFILE: reuse}, clean_first=True)
client.run("export . lasote/stable")
client.run('install reuse/0.1@lasote/stable --build -g txt')
# Assert generator TXT
txt_contents = client.load("conanbuildinfo.txt")
self.assertIn("[USER_LIB_A]%sVAR1=2" % os.linesep, txt_contents)
self.assertIn("[USER_LIB_B]%sVAR1=2%sVAR2=3" % (os.linesep, os.linesep), txt_contents)
self.assertIn("[USER_LIB_C]%sVAR1=2" % os.linesep, txt_contents)
self.assertIn("[USER_LIB_D]%svar1=2" % os.linesep, txt_contents)
# Now try local command with a consumer
client.run('install . --build')
client.run("build .")
```
#### File: test/utils/genconanfile.py
```python
class GenConanfile(object):
"""
USAGE:
x = GenConanfile().with_import("import os").\
with_setting("os").\
with_option("shared", [True, False]).\
with_default_option("shared", True).\
with_build_msg("holaaa").\
with_build_msg("adiooos").\
with_package_file("file.txt", "hola").\
with_package_file("file2.txt", "hola")
"""
def __init__(self, name=None, version=None):
self._imports = ["from conans import ConanFile"]
self._name = name
self._version = version
self._settings = []
self._options = {}
self._generators = []
self._default_options = {}
self._package_files = {}
self._package_files_env = {}
self._build_messages = []
self._scm = {}
self._requires = []
self._requirements = []
self._build_requires = []
self._revision_mode = None
self._package_info = {}
self._package_id_lines = []
self._test_lines = []
def with_name(self, name):
self._name = name
return self
def with_version(self, version):
self._version = version
return self
def with_revision_mode(self, revision_mode):
self._revision_mode = revision_mode
return self
def with_scm(self, scm):
self._scm = scm
return self
def with_generator(self, generator):
self._generators.append(generator)
return self
def with_require(self, ref, private=False, override=False):
return self.with_require_plain(ref.full_str(), private, override)
def with_require_plain(self, ref_str, private=False, override=False):
self._requires.append((ref_str, private, override))
return self
def with_requirement(self, ref, private=False, override=False):
return self.with_requirement_plain(ref.full_str(), private, override)
def with_requirement_plain(self, ref_str, private=False, override=False):
self._requirements.append((ref_str, private, override))
return self
def with_build_require(self, ref):
return self.with_build_require_plain(ref.full_str())
def with_build_require_plain(self, ref_str):
self._build_requires.append(ref_str)
return self
def with_import(self, i):
if i not in self._imports:
self._imports.append(i)
return self
def with_setting(self, setting):
self._settings.append(setting)
return self
def with_option(self, option_name, values):
self._options[option_name] = values
return self
def with_default_option(self, option_name, value):
self._default_options[option_name] = value
return self
def with_package_file(self, file_name, contents=None, env_var=None):
if not contents and not env_var:
raise Exception("Specify contents or env_var")
self.with_import("import os")
self.with_import("from conans import tools")
if contents:
self._package_files[file_name] = contents
if env_var:
self._package_files_env[file_name] = env_var
return self
def with_build_msg(self, msg):
self._build_messages.append(msg)
return self
def with_package_info(self, cpp_info=None, env_info=None):
assert isinstance(cpp_info, dict), "cpp_info ({}) expects dict".format(type(cpp_info))
assert isinstance(env_info, dict), "env_info ({}) expects dict".format(type(env_info))
if cpp_info:
self._package_info["cpp_info"] = cpp_info
if env_info:
self._package_info["env_info"] = env_info
return self
def with_package_id(self, line):
self._package_id_lines.append(line)
return self
def with_test(self, line):
self._test_lines.append(line)
return self
@property
def _name_line(self):
if not self._name:
return ""
return "name = '{}'".format(self._name)
@property
def _version_line(self):
if not self._version:
return ""
return "version = '{}'".format(self._version)
@property
def _scm_line(self):
if not self._scm:
return ""
line = ", ".join('"%s": "%s"' % (k, v) for k, v in self._scm.items())
return "scm = {%s}" % line
@property
def _generators_line(self):
if not self._generators:
return ""
line = ", ".join('"{}"'.format(generator) for generator in self._generators)
return "generators = {}".format(line)
@property
def _revision_mode_line(self):
if not self._revision_mode:
return ""
line = "revision_mode=\"{}\"".format(self._revision_mode)
return line
@property
def _settings_line(self):
if not self._settings:
return ""
line = ", ".join('"%s"' % s for s in self._settings)
return "settings = {}".format(line)
@property
def _options_line(self):
if not self._options:
return ""
line = ", ".join('"%s": %s' % (k, v) for k, v in self._options.items())
tmp = "options = {%s}" % line
return tmp
@property
def _default_options_line(self):
if not self._default_options:
return ""
line = ", ".join('"%s": %s' % (k, v) for k, v in self._default_options.items())
tmp = "default_options = {%s}" % line
return tmp
@property
def _build_requires_line(self):
if not self._build_requires:
return ""
line = ", ".join(['"{}"'.format(r) for r in self._build_requires])
tmp = "build_requires = %s" % line
return tmp
@property
def _requires_line(self):
if not self._requires:
return ""
items = []
for ref, private, override in self._requires:
if private or override:
private_str = ", 'private'" if private else ""
override_str = ", 'override'" if override else ""
items.append('("{}"{}{})'.format(ref, private_str, override_str))
else:
items.append('"{}"'.format(ref))
tmp = "requires = ({}, )".format(", ".join(items))
return tmp
@property
def _requirements_method(self):
if not self._requirements:
return ""
lines = []
for ref, private, override in self._requirements:
private_str = ", private=True" if private else ""
override_str = ", override=True" if override else ""
lines.append(' self.requires("{}"{}{})'.format(ref, private_str, override_str))
return """
def requirements(self):
{}
""".format("\n".join(lines))
@property
def _package_method(self):
lines = []
if self._package_files:
lines = [' tools.save(os.path.join(self.package_folder, "{}"), "{}")'
''.format(key, value)
for key, value in self._package_files.items()]
if self._package_files_env:
lines.extend([' tools.save(os.path.join(self.package_folder, "{}"), '
'os.getenv("{}"))'.format(key, value)
for key, value in self._package_files_env.items()])
if not lines:
return ""
return """
def package(self):
{}
""".format("\n".join(lines))
@property
def _build_method(self):
if not self._build_messages:
return ""
lines = [' self.output.warn("{}")'.format(m) for m in self._build_messages]
return """
def build(self):
{}
""".format("\n".join(lines))
@property
def _package_info_method(self):
if not self._package_info:
return ""
lines = []
if "cpp_info" in self._package_info:
for k, v in self._package_info["cpp_info"].items():
lines.append(' self.cpp_info.{} = {}'.format(k, str(v)))
if "env_info" in self._package_info:
for k, v in self._package_info["env_info"].items():
lines.append(' self.env_info.{} = {}'.format(k, str(v)))
return """
def package_info(self):
{}
""".format("\n".join(lines))
@property
def _package_id_method(self):
if not self._package_id_lines:
return ""
lines = [' {}'.format(line) for line in self._package_id_lines]
return """
def package_id(self):
{}
""".format("\n".join(lines))
@property
def _test_method(self):
if not self._test_lines:
return ""
lines = ['', ' def test(self):'] + [' %s' % m for m in self._test_lines]
return "\n".join(lines)
def __repr__(self):
ret = []
ret.extend(self._imports)
ret.append("class HelloConan(ConanFile):")
if self._name_line:
ret.append(" {}".format(self._name_line))
if self._version_line:
ret.append(" {}".format(self._version_line))
if self._generators_line:
ret.append(" {}".format(self._generators_line))
if self._requires_line:
ret.append(" {}".format(self._requires_line))
if self._requirements_method:
ret.append(" {}".format(self._requirements_method))
if self._build_requires_line:
ret.append(" {}".format(self._build_requires_line))
if self._scm:
ret.append(" {}".format(self._scm_line))
if self._revision_mode_line:
ret.append(" {}".format(self._revision_mode_line))
if self._settings_line:
ret.append(" {}".format(self._settings_line))
if self._options_line:
ret.append(" {}".format(self._options_line))
if self._default_options_line:
ret.append(" {}".format(self._default_options_line))
if self._build_method:
ret.append(" {}".format(self._build_method))
if self._package_method:
ret.append(" {}".format(self._package_method))
if self._package_info_method:
ret.append(" {}".format(self._package_info_method))
if self._package_id_lines:
ret.append(" {}".format(self._package_id_method))
if self._test_method:
ret.append(" {}".format(self._test_method))
if len(ret) == 2:
ret.append(" pass")
return "\n".join(ret)
``` |
{
"source": "john-mcgee/new-pinto-bean",
"score": 4
} |
#### File: john-mcgee/new-pinto-bean/JM-StatScraper-Script.py
```python
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from datetime import date
import time
import csv
today = date.today()
today = today.strftime("%m%d%Y")
#Prepare the soup
site = "https://www.pro-football-reference.com"
chrome_options = Options()
chrome_options.add_argument("--headless")
browser = webdriver.Chrome(options=chrome_options)
browser.get(site)
time.sleep(1)
html = browser.page_source
homepage = BeautifulSoup(html, 'lxml')
#Create offensive stat CSV file (off_csv) and write headers
off_csv = "FF-Off-Stats-{date}.csv".format(date=today)
off_headers = ["Player","Team","Cmp","Att","Yds","TD","Int","Sk","Yds","Lng","Rate","Att","Yds","TD","Lng","Tgt","Rec","Yds","TD","Lng","Fmb","FL","Week"]
off_file = open(off_csv, "w", newline='')
off_writer = csv.writer(off_file)
off_writer.writerow(off_headers)
#Create defensive stat CSV file (dst_csv) and write headers
dst_csv = "FF-DST-Stats-{date}.csv".format(date=today)
dst_headers = ["Player", "Team","Int","Yds","TD","Lng","PD","Sk","Comb","Solo","Ast","TFL","QBHits","FR","Yds","TD","FF","Week"]
dst_file = open(dst_csv, "w", newline='')
dst_writer = csv.writer(dst_file)
dst_writer.writerow(dst_headers)
#Create snap count stat CSV file (snaps_csv) and write headers
snaps_csv = "FF-Snaps-Stats-{date}.csv".format(date=today)
snaps_headers = ["Player","Pos","Num","Pct","Num","Pct","Num","Pct","Week","Team"]
snaps_file = open(snaps_csv, "w", newline='')
snaps_writer = csv.writer(snaps_file)
snaps_writer.writerow(snaps_headers)
#Function for scraping offense stats
def Offense():
#Locate offensive player data and save it to a list to be cleaned
offense_stats = []
game_content = gamepage.find("div", id="content", role="main")
player_off = game_content.find("div", id="all_player_offense")
all_player_off = player_off.find("tbody")
for player in all_player_off.find_all("tr"):
player_stats = player.get_text(separator=',')
offense_stats.append(player_stats)
#Add missing blank QB rating for non-QBs, eliminate in-table headers and save to off_csv
for each in offense_stats:
each_list = each.split(",")
if len(each_list) < 20 or len(each_list) > 30:
continue
elif len(each_list) < 22:
each_list.insert(10," ")
each_list.append(week)
off_writer.writerow(each_list)
else:
each_list.append(week)
off_writer.writerow(each_list)
off_file.close
#Function for scraping defense stats
def Defense():
#Locate defensive player data
dst_stats = []
game_content = gamepage.find("div", id="content", role="main")
player_def = game_content.find("div", id="all_player_defense")
table_def = player_def.find("tbody")
for player in table_def.find_all("tr"):
player_stats = player.get_text(separator=',')
dst_stats.append(player_stats)
#Eliminate in-table headers and save to dst_csv
for each in dst_stats:
each_list = each.split(",")
if len(each_list) < 10 or len(each_list) > 30:
continue
else:
each_list.append(week)
dst_writer.writerow(each_list)
dst_file.close
#Function for scraping snap counts
def Snaps():
#Locate home team snap count data
h_snaps_stats = []
game_content = gamepage.find("div", id="content", role="main")
player_snaps = game_content.find("div", id="div_home_snap_counts")
table_snaps = player_snaps.find("tbody")
for player in table_snaps.find_all("tr"):
player_stats = player.get_text(separator=',')
h_snaps_stats.append(player_stats)
#Eliminate in-table headers and save to snaps_csv
for each in h_snaps_stats:
each_list = each.split(",")
if len(each_list) < 7 or len(each_list) > 15:
continue
elif "\n" in each_list:
continue
else:
each_list.append(week)
each_list.append(team_home)
snaps_writer.writerow(each_list)
#Locate visitor team snap count data
v_snaps_stats = []
game_content = gamepage.find("div", id="content", role="main")
player_snaps = game_content.find("div", id="div_vis_snap_counts")
table_snaps = player_snaps.find("tbody")
for player in table_snaps.find_all("tr"):
player_stats = player.get_text(separator=',')
v_snaps_stats.append(player_stats)
#Eliminate in-table headers and save to snaps_csv
for each in v_snaps_stats:
each_list = each.split(",")
if len(each_list) < 7 or len(each_list) > 15:
continue
elif "\n" in each_list:
continue
else:
each_list.append(week)
each_list.append(team_vis)
snaps_writer.writerow(each_list)
snaps_file.close
#Scrape the links to the boxscores of the most recent NFL games
gamelink_list = []
for linktd in homepage.find_all("td", class_="right gamelink"):
gamelink = str(linktd.a)
linksplit = gamelink.split("\"")
gamelink_list.append(site+linksplit[1])
#Iterate through each boxscore link to scrape game-specific stats
for gamelink in gamelink_list:
browser.get(gamelink)
time.sleep(5)
game_html = browser.page_source
gamepage = BeautifulSoup(game_html, "lxml")
#Identify teams to append to snap counts
teams = []
team_list = gamepage.find("table", id="team_stats").find("tr").get_text(separator=',')
for each in team_list.split(","):
if each == "\n":
continue
else:
teams.append(each)
team_home = teams[1]
team_vis = teams[0]
#Identify week # to append to all tables
week_text = gamepage.find("h2").find("a").get_text(separator=',')
week = week_text.strip("Week ")
Offense() #Scrape offensive stats on current game page
Defense() #Scrape defensive stats on current game page
Snaps() #Scrape snap counts on current game page
``` |
{
"source": "JohnMcGJr/Cloud-Deployment-Samples",
"score": 2
} |
#### File: Cloud-Deployment-Samples/CloudFormation/generate_qcft.py
```python
from troposphere import FindInMap, GetAtt, Output
from troposphere import Parameter, Ref, Template, Join
import troposphere.ec2 as ec2
# The purpose of this scipt is to generate a AWS CloudFormation Template
# for QF2 that is pre-configured for a requested number of cluster nodes, and
# contains the proper configuration to allow those cluster nodes to
# form a cluster and serve clients.
#
# TODO Launch CloudFormation with the completed CFT.
# add_params() takes a given Template object and adds parameters for SSH keys,
# allowed AWS node types, VPC, and Subnet ID
def add_params(t):
t.add_parameter(Parameter(
"KeyName",
Description="Name of an existing EC2 KeyPair to enable SSH "
"access to the node",
Type="AWS::EC2::KeyPair::KeyName",
))
t.add_parameter(Parameter(
"InstanceType",
Description="EC2 instance type for QF2 node",
Type="String",
Default="m4.4xlarge",
AllowedValues=[
"m4.xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.10xlarge",
"m4.16xlarge"
],
ConstraintDescription="Must be a Qumulo supported EC2 instance type.",
))
t.add_parameter(Parameter(
"VpcId",
Description="ID of the VPC in which to deploy QF2.",
Type="AWS::EC2::VPC::Id",
ConstraintDescription="Must be the ID of an existing VPC.",
))
t.add_parameter(Parameter(
"SubnetId",
Description="ID of the Subnet in which to deploy QF2.",
Type="AWS::EC2::Subnet::Id",
ConstraintDescription="Must be the ID of an existing Subnet.",
))
# add_amimap() takes a given Template object and AMI ID then creates the Region to AMI ID map
# which is referenced by the add_nodes function.
def add_amimap(t, amiid):
t.add_mapping('RegionMap', {
"us-east-1": {"AMI": amiid},
"us-east-2": {"AMI": "US-EAST-1-AMI-CLONE"},
"us-west-1": {"AMI": "US-EAST-1-AMI-CLONE"},
"us-west-2": {"AMI": "US-EAST-1-AMI-CLONE"},
"ca-central-1": {"AMI": "US-EAST-1-AMI-CLONE"},
"eu-central-1": {"AMI": "US-EAST-1-AMI-CLONE"},
"eu-west-1": {"AMI": "US-EAST-1-AMI-CLONE"},
"eu-west-2": {"AMI": "US-EAST-1-AMI-CLONE"},
"eu-west-3": {"AMI": "US-EAST-1-AMI-CLONE"}
})
# add_secgroup() takes a given Template object and adds properly configured AWS
# security group to enable QF2 to cluster, replicate, and serve clients.
# Ports enabled by default:
# TCP 21, 80, 111, 443, 445, 2049, 3712, 8000
# UDP 111, 2049
# All traffic is allowed between members of the security group for clustering.
def add_secgroup(t):
sg_in = []
sg_out = []
#Ingress TCP ports
for port in ['21', '80', '111', '443', '445', '2049', '3712', '8000']:
sg_in.append(ec2.SecurityGroupRule(
Description = "TCP ports for NFS, SMB, FTP, Management, and Replication",
IpProtocol = 'tcp',
FromPort = port,
ToPort = port,
CidrIp = '0.0.0.0/0'
)
)
#Ingress UDP ports
for port in ['111', '2049']:
sg_in.append(ec2.SecurityGroupRule(
Description = "UDP ports for NFS",
IpProtocol = 'udp',
FromPort = port,
ToPort = port,
CidrIp = '0.0.0.0/0'
)
)
#Egress rule for all ports and protocols
sg_out.append(ec2.SecurityGroupRule(
Description = "Outbound traffic",
IpProtocol = '-1',
FromPort = 0,
ToPort = 0,
CidrIp = '0.0.0.0/0'
)
)
t.add_resource(ec2.SecurityGroup(
"QumuloSecurityGroup",
GroupDescription = "Enable ports for NFS/SMB/FTP, Management, Replication, and Clustering.",
SecurityGroupIngress = sg_in,
SecurityGroupEgress = sg_out,
VpcId = Ref("VpcId")
))
# Self referencing security rules need to be added after the group is created.
# This rule is enabling all traffic between members of the security group for
# clustering.
t.add_resource(ec2.SecurityGroupIngress(
"QumuloSecurityGroupNodeRule",
DependsOn = "QumuloSecurityGroup",
Description = "Qumulo Internode Communication",
GroupId = Ref("QumuloSecurityGroup"),
IpProtocol = '-1',
FromPort = 0,
ToPort = 0,
SourceSecurityGroupId = Ref("QumuloSecurityGroup")
))
# add_nodes() takes a given Template object, an count of nodes to create, and
# a name to prefix all EC2 instances with. EC2 instances will be created with the
# naming structure of Prefix + Node + NodeNumber.
def add_nodes(t, nodes, prefix):
nodes_list = []
for x in range(0, nodes):
node_name = prefix + "Node" + str((x + 1))
t.add_resource(
ec2.Instance(
node_name,
ImageId = FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
InstanceType = Ref("InstanceType"),
KeyName = Ref("KeyName"),
NetworkInterfaces = [
ec2.NetworkInterfaceProperty(
AssociatePublicIpAddress = False,
GroupSet = [Ref("QumuloSecurityGroup")],
DeviceIndex = 0,
DeleteOnTermination = True,
SubnetId = Ref("SubnetId"),
)
]
)
)
nodes_list.append(node_name)
# Create a list containing the Private IPs of all nodes.
output_ips = []
for i in nodes_list:
output_ips.append(GetAtt(i, "PrivateIp"))
t.add_output(Output(
"ClusterPrivateIPs",
Description="Copy and paste this list into the QF2 Cluster Creation Screen",
Value=Join(", ", output_ips),
))
t.add_output(Output(
"LinkToManagement",
Description="Click to launch the QF2 Admin Console",
Value=Join("", ["https://",GetAtt(nodes_list[0], "PrivateIp")]),
))
t.add_output(Output(
"InstanceId",
Description="Copy and paste this instance ID into the QF2 Cluster Creation Screen.",
Value=Ref(prefix + "Node1"),
))
# create_qumulo_cft() takes a count of nodes to create, a prefix for node names, and an AMI ID.
# This function will return a completed Template object fully configured
# with the number of nodes requested.
def create_qumulo_cft(nodes, prefix, amiid):
t = Template()
t.add_description("QF2 for AWS has the highest performance of any file storage "
"system in the public cloud and a complete set of enterprise features, such "
"as support for SMB, real-time visibility into the storage system, "
"directory-based capacity quotas, and snapshots.")
add_params(t)
add_amimap(t, amiid)
add_secgroup(t)
add_nodes(t, nodes, prefix)
return t
# write_listing_cfts() takes in a prefix to be used for node/file naming, a suffix for the file
# name, and an AMI ID for the us-east-1 AMI ID that will be cloned to other regions when the
# listing is active. Initially this will create three CFTs: 4, 6, and 10 node clusters.
def write_listing_cfts(prefix, suffix, amiid):
qcft4 = create_qumulo_cft(4, prefix, amiid)
qcft6 = create_qumulo_cft(6, prefix, amiid)
qcft10 = create_qumulo_cft(10, prefix, amiid)
f_four_node = open(prefix + "-4Node-" + suffix + ".json", "w")
f_four_node.write(qcft4.to_json())
f_four_node.close()
f_six_node = open(prefix + "-6Node-" + suffix + ".json", "w")
f_six_node.write(qcft6.to_json())
f_six_node.close()
f_ten_node = open(prefix + "-10Node-" + suffix + ".json", "w")
f_ten_node.write(qcft10.to_json())
f_ten_node.close()
if __name__ == '__main__':
write_listing_cfts("QF2", "5TB", "ami-0756b42577c89cece") #to be replaced with real 5TB AMIID
write_listing_cfts("QF2", "20TB", "AMI-ID-US-EAST-1") #to be replaced with real 5TB AMIID
``` |
{
"source": "johnmcguire1/IsaacGymEnvs",
"score": 2
} |
#### File: IsaacGymEnvs/isaacgymenvs/combineNN.py
```python
import torch
nn0 = torch.load('runs/Cartpole/nn0/Cartpole.pth')
nn1 = torch.load('runs/Cartpole/nn1/Cartpole.pth')
nn2 = torch.load('runs/Cartpole/nn0/last_Cartpoleep101rew[490.87].pth')
"""
print("value_mean_std.running_mean: ")
print(nn1['model']['value_mean_std.running_mean'].item())
print("value_mean_std.running_var: ")
print(nn1['model']['value_mean_std.running_var'].item())
print("value_mean_std.count: ")
print(nn1['model']['value_mean_std.count'].item())
print("running_mean_std.running_mean: ")
print(nn1['model']['running_mean_std.running_mean'])
print("running_mean_std.running_var: ")
print(nn1['model']['running_mean_std.running_var'])
print("running_mean_std.count: ")
print(nn1['model']['running_mean_std.count'].item())
print("a2c_network.sigma: ")
print(nn1['model']['a2c_network.sigma'].item())
print("a2c_network.actor_mlp.0.weight: ")
print(nn1['model']['a2c_network.actor_mlp.0.weight'])
print("a2c_network.actor_mlp.0.bias: ")
print(nn1['model']['a2c_network.actor_mlp.0.bias'])
print("a2c_network.actor_mlp.2.weight: ")
print(nn1['model']['a2c_network.actor_mlp.2.weight'])
print("a2c_network.actor_mlp.2.bias: ")
print(nn1['model']['a2c_network.actor_mlp.2.bias'])
print("a2c_network.value.weight: ")
print(nn1['model']['a2c_network.value.weight'])
print("a2c_network.value.bias: ")
print(nn1['model']['a2c_network.value.bias'])
print("a2c_network.mu.weight: ")
print(nn1['model']['a2c_network.mu.weight'])
print("a2c_network.mu.bias: ")
print(nn1['model']['a2c_network.mu.bias'])
"""
print(nn2['model'].keys())
for key in nn1['model'].keys():
print((nn0['model'][key] + nn1['model'][key]) / 2)
def averageNNs(nn0_path, nn1_path):
"""
nn0 = torch.load(nn0_path)
nn1 = torch.load(nn1_path)
value_running_mean_tensor =
value_running_var_tensor =
value_count_tensor =
running_running_mean_tensor =
running_running_var_tensor =
running_count_tensor =
a2c_sigma_tensor =
a2c_0_weight_tensor =
a2c_0_bias_tensor =
a2c_2_weight_tensor =
a2c_2_bias_tensor =
a2c_value_weight_tensor =
a2c_value_bias_tensor =
a2c_mu_weight_tensor =
a2c_mu_bias_tensor =
"""
#print(nn0.named_parameters())
#Average all parameters
#for key in nn0:
# nn0[key] = (nn0[key] + nn1[key])/2
# nn1[key] = nn0[key]
#print(nn0)
#torch.save(nn0, 'runs/Cartpole/nn0/Cartpole.pth')
#torch.save(nn1, 'runs/Cartpole/nn1/Cartpole.pth')
``` |
{
"source": "JohnMcSpedon/GoDaddy_DNS_migrator",
"score": 2
} |
#### File: JohnMcSpedon/GoDaddy_DNS_migrator/godaddy_dns.py
```python
import os
import time
from pprint import pprint
from typing import List
import requests
import credential_loaders
BASE_URL = "https://api.godaddy.com"
# You can easily replace these with a different CredentialLoader to match your key management system
API_KEY_CRED_LOADER = credential_loaders.EnvVarCredentialLoader("GODADDY_API_KEY")
API_SECRET_CRED_LOADER = credential_loaders.EnvVarCredentialLoader("GODADDY_API_SECRET")
# API_KEY_CRED_LOADER = credential_loaders.PlaintextCredentialLoader("./api_key.txt")
# API_SECRET_CRED_LOADER = credential_loaders.PlaintextCredentialLoader("./api_secret.txt")
def _get_headers() -> dict:
"""Get authorization header for GoDaddy Developer API.
https://developer.godaddy.com/keys
"""
api_key = API_KEY_CRED_LOADER.load_credentials()
api_secret = API_SECRET_CRED_LOADER.load_credentials()
return {"Authorization": "sso-key {}:{}".format(api_key, api_secret)}
def _call_endpoint(url_suffix: str, base_url: str = BASE_URL) -> dict:
"""Call GoDaddy developer API endpoint.
Only supports GET endpoints to keep access read-only.
"""
headers = _get_headers()
url = os.path.join(base_url, url_suffix)
resp = requests.get(url, headers=headers)
return resp.json()
def get_domains() -> List[str]:
"""Get list of Domains for this API key."""
ret = _call_endpoint("v1/domains")
# Example response:
# [{'createdAt': '2016-06-25T03:08:44.000Z',
# 'domain': 'mydomain.com',
# 'domainId': 12345678,
# 'expirationProtected': False,
# 'expires': '2020-06-25T03:08:44.000Z',
# 'holdRegistrar': False,
# 'locked': True,
# 'nameServers': None,
# 'privacy': False,
# 'renewAuto': True,
# 'renewDeadline': '2020-08-09T03:08:44.000Z',
# 'renewable': True,
# 'status': 'ACTIVE',
# 'transferProtected': False},]
domains = [d["domain"] for d in ret]
return domains
def get_domain_dns_records(domain):
"""Get DNS entries for a specific domain
Returns:
List with format (for example):
[ {'data': '172.16.17.32', 'name': '_dmarc', 'ttl': 3600, 'type': 'A'},
{'data': 'ns37.domaincontrol.com', 'name': '@', 'ttl': 3600, 'type': 'NS'}, ...]
"""
url_suffix = "v1/domains/{}/records".format(domain)
ret = _call_endpoint(url_suffix)
if isinstance(ret, dict) and ret.get('code', None) == "UNKNOWN_DOMAIN":
# e.g. {'code': 'UNKNOWN_DOMAIN', 'message': 'The given domain is not registered, or does not have a zone file'}
raise Exception(f"Can't find domain {domain}. Are you sure your API key and secret are correct?: {ret}")
return ret
def print_all_dns_records():
""" Print each domain and its DNS records (for domains linked to this API key)."""
for domain in sorted(get_domains()):
dns_records = get_domain_dns_records(domain)
print(domain)
pprint(dns_records)
print("*" * 50)
# TODO: poor man's rate limiter. improve?
time.sleep(2)
if __name__ == "__main__":
print_all_dns_records()
``` |
{
"source": "johnmdelgado/SRE-Project",
"score": 4
} |
#### File: SRE-Project/scripts/common_password.py
```python
import re
def common_password_check(password, exclude_list, print_valid, debug):
if(debug):
print("Entered common Password Check")
print("Password is: {}".format(password))
print("Exclude list is: {}".format(exclude_list))
print("encoded password is: {}".format(password.encode()))
valid_password_check = re.search(password.encode(),
exclude_list)
if(debug):
print(valid_password_check)
if(valid_password_check):
print("Password: {} was found on the common password exception list.".format(password))
return True
else:
if((debug) or (print_valid)):
print("Password {} was a valid password. Moving to nextline.".format(password))
return False
```
#### File: SRE-Project/tests/test__file_importer.py
```python
import os
import sys
import inspect
functions_dir = os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))))+ "/scripts" #scripts Directory
print(functions_dir)
sys.path.insert(0, functions_dir)
import file_importer
import unittest
import yaml
with open("../configs/config.yaml", "r") as ymlfile:
config = yaml.safe_load(ymlfile)
class password_characters_test_case(unittest.TestCase):
# ========================================================================================
# Program termininating test cases
# ========================================================================================
def test_file_path_does_not_exist(self):
# should exit executing code
test_string = "./test.txt"
self.assertRaises(Exception, file_importer.file_importer, test_string,config["debugging"]["test_debug"])
def test_file_is_not_txt_file(self):
# should exit executing code
testString = "../data/test.csv"
self.assertRaises(Exception, file_importer.file_importer, testString,config["debugging"]["test_debug"])
# ========================================================================================
# Valid filepaths returning map test cases
# ========================================================================================
def test_default_file_path_from_config(self):
# should exit executing code
testString = config["testing"]["sample_excluded_pw_filepath"]
result = file_importer.file_importer(testString,
config["debugging"]["test_debug"])
self.assertIsInstance(result, object)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johnm-dev/russng",
"score": 2
} |
#### File: russng/tools/rubb.py
```python
import os
import os.path
import pwd
import shutil
import signal
import stat
import subprocess
import sys
from sys import stderr
import traceback
# system
ETC_DIR = "/etc/russ"
RUN_DIR = "/var/run/russ"
CONF_DIR = "%s/conf" % RUN_DIR
PIDS_DIR = "%s/pids" % RUN_DIR
SERVICES_DIR = "%s/services" % RUN_DIR
SYSTEM_SOURCESFILE = "%s/bb.sources" % ETC_DIR
SYSTEM_BBBASEDIR = "%s/bb" % RUN_DIR
SYSTEM_SAFEPATHS = ["/run/russ/bb", "/var/run/russ/bb"]
DEVNULL = open("/dev/null", "w")
class BB:
"""Manage bulletin board (BB) for RUSS services.
Organized as:
.../bb/
<bbname>/
conf/
pids/
services/
The pids dir is only used for "system" BBs.
"""
def __init__(self, bbdir):
self.bbdir = bbdir
self.name = os.path.basename(bbdir)
self.confdir = os.path.join(self.bbdir, "conf")
self.pidsdir = os.path.join(self.bbdir, "pids")
self.servicesdir = os.path.join(self.bbdir, "services")
def prep(self):
"""Ensure working areas exist.
"""
print("prepping bb (%s) ..." % (self.name,))
for dirpath in [self.confdir, self.pidsdir, self.servicesdir]:
if not os.path.isdir(dirpath):
if verbose:
print("makedir (%s)" % (dirpath,))
os.makedirs(dirpath)
def clean(self, safepaths):
"""Clean areas associated with srcname.
"""
print("cleaning bb (%s) ..." % (self.name,))
for dirpath in [self.confdir, self.pidsdir, self.servicesdir]:
if os.path.exists(dirpath):
for safepath in safepaths:
if dirpath.startswith(safepath):
for name in os.listdir(dirpath):
path = os.path.join(dirpath, name)
if verbose:
print("removing (%s)" % (path,))
os.remove(path)
if not os.listdir(dirpath):
os.rmdir(dirpath)
if os.path.exists(self.bbdir):
if not os.listdir(self.bbdir):
if verbose:
print("rmdir (%s)" % (self.bbdir,))
os.rmdir(self.bbdir)
def get_confnames(self):
"""Return configuration names without the .conf.
"""
_, _, names = next(os.walk(self.confdir))
names = [name[:-5] for name in names]
return names
def get_names(self):
"""Return all names found under conf/ and services/.
"""
if os.path.isdir(self.confdir):
_, _, confnames = next(os.walk(self.confdir))
else:
confnames = []
if os.path.isdir(self.servicesdir):
_, _, servicenames = next(os.walk(self.servicesdir))
else:
servicenames = []
names = [name[:-5] for name in confnames if name.endswith(".conf")]
names.extend(servicenames)
return set(names)
def get_servernames(self):
"""List server names.
"""
names = os.listdir(self.servicesdir)
return names
def get_server(self, name):
return BBServer(self, name)
def install(self, filename, newname=None):
"""Install file contents to configuration file.
"""
self.prep()
if newname:
name = newname
else:
name = os.path.basename(filename)
if name.endswith(".conf"):
name = name[:-5]
print("installing (%s) from file (%s)" % (name, filename))
txt = open(filename).read()
bs = self.get_server(name)
bs.install(txt)
def remove(self, name):
"""Remove configuration.
"""
bs = self.get_server(name)
if bs:
bs.removeconf()
def show(self, name):
"""Show configuration.
"""
bs = self.get_server(name)
if bs:
txt = bs.get_conf()
if txt:
print(txt)
def start_servers(self, names):
"""Start select named or all servers of a BB.
"""
print("starting servers for bb (%s) ..." % (self.name,))
for name in names:
bs = self.get_server(name)
if bs.isrunning():
stderr.write("warning: server (%s) already running\n" % (name,))
else:
bs.start()
st = bs.get_status()
if st:
print("bb=%(bbname)s:name=%(name)s:running=%(isrunning)s" % st)
def status_servers(self, names, detail=False):
"""Output status of select named or all servers of a BB.
"""
for name in names:
bs = self.get_server(name)
st = bs.get_status()
if st:
if detail:
print("bb=%(bbname)s:name=%(name)s:running=%(isrunning)s:type=%(type)s:pid=%(pid)s:conffile=%(conffile)s:servicefile=%(servicefile)s" % st)
else:
print("bb=%(bbname)s:name=%(name)s:running=%(isrunning)s" % st)
def stop_servers(self, names):
"""Stop select named or all servers of a BB.
"""
print("stopping servers for bb (%s) ..." % (self.name,))
for name in names:
bs = self.get_server(name)
if bs.isrunning():
bs.stop()
st = bs.get_status()
if st:
print("bb=%(bbname)s:name=%(name)s:running=%(isrunning)s" % st)
def sync(self, sources, tags=None, preclean=False):
"""Sync configuration from sources to BB.
Configurations that are not found in the sources are cleaned.
"""
print("syncing bb (%s) ..." % (self.name,))
if tags:
sources = [d for d in sources if d["name"] in tags]
self.prep()
foundfilenames = set([name for name in os.listdir(self.confdir) if name.endswith(".conf")])
if preclean:
for filename in foundfilenames:
name = filename[:-5]
s = self.get_server(name)
s.stop()
s.clean()
syncfilenames = []
for d in sources:
srctype = d["type"]
srcpath = d["source"]
if srctype in ["dir", "file"]:
if srctype == "dir":
filenames = os.listdir(srcpath)
else:
filenames = [os.path.basename(srcpath)]
srcpath = os.path.dirname(srcpath)
filenames = [name for name in filenames if name.endswith(".conf")]
for filename in filenames:
name = filename[:-5]
if filename in syncfilenames:
stderr.write("skipping. will not sync duplicate name (%s) from source (%s)\n" % (name, d["name"]))
continue
txt = open(os.path.join(srcpath, filename)).read()
s = BBServer(self, name)
print("installing (%s) from source (%s)" % (name, d["name"]))
s.install(txt)
syncfilenames.append(filename)
# clean
if not tags:
for filename in foundfilenames.difference(syncfilenames):
name = filename[:-5]
s = BBServer(self, name)
print("cleaning (%s)" % (name,))
s.clean()
class BBServer:
"""Manage server under BB location.
"""
def __init__(self, bb, name):
self.bb = bb
self.name = name
self.confname = "%s.conf" % (name,)
self.conffile = os.path.join(self.bb.confdir, self.confname)
self.pidfile = os.path.join(self.bb.pidsdir, self.name)
self.servicefile = os.path.join(self.bb.servicesdir, self.name)
def _getpid(self):
try:
return int(open(self.pidfile).read())
except:
return None
def _hasserviceconffile(self):
try:
line = open(self.conffile).readline()
return " service=conffile" in line
except:
return False
def _killpid(self):
if self.isrunning():
pid = self._getpid()
os.kill(-pid, signal.SIGHUP)
self._removepid()
def _removepid(self):
try:
os.remove(self.pidfile)
except:
pass
def _removeservice(self):
if os.path.exists(self.servicefile):
os.remove(self.servicefile)
def _ruspawn(self):
pargs = [
"ruspawn",
"-f", self.conffile,
"-c", "main:pgid=0",
"-c", "main:addr=%s" % (self.servicefile,)
]
p = subprocess.Popen(pargs,
stdin=DEVNULL,
#stdout=DEVNULL,
#stderr=DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
out, err = p.communicate()
if debug:
print("pargs (%s)" % (pargs,))
print("pid (%s) out (%s) err (%s)" % (p.pid, out, err))
if p.pid == None:
return False
self._setpid(p.pid)
return True
def _setpid(self, pid):
open(self.pidfile, "w+").write("%s" % (pid,))
def clean(self):
"""Clean server items
"""
self.removeconf()
self._removepid()
self._removeservice()
def get_conf(self):
try:
return open(self.conffile).read()
except:
pass
def get_status(self):
"""Return status information.
"""
d = {
"bbname": self.bb.name,
"conffile": os.path.exists(self.conffile) and self.conffile or None,
"isrunning": self.isrunning(),
"name": self.name,
"pid": self._getpid(),
"pidfile": os.path.exists(self.pidfile) and self.pidfile or None,
"servicefile": os.path.exists(self.servicefile) and self.servicefile or None,
"type": self.isconffile() and "conffile" or "socket",
}
return d
def install(self, txt):
"""Install configuration file.
"""
open(self.conffile, "w+").write(txt)
def isconffile(self):
"""Check if servicefile is conffile rather than a socket file.
"""
try:
st = os.stat(self.servicefile)
if stat.S_ISSOCK(st.st_mode):
return False
return self._hasserviceconffile()
except:
if debug:
traceback.print_exc()
return False
def isrunning(self):
"""Check if server is running.
A running server has a servicefile and a pidfile.
"""
try:
if os.path.exists(self.pidfile):
pid = open(self.pidfile).read()
os.kill(-int(pid), 0)
return True
else:
return self.isconffile()
except:
if debug:
traceback.print_exc()
return False
def removeconf(self):
if os.path.exists(self.conffile):
os.remove(self.conffile)
def restart(self):
self.stop()
self.start()
def start(self):
if self._hasserviceconffile():
shutil.copy(self.conffile, self.servicefile)
else:
self._ruspawn()
def stop(self):
if not self.isconffile():
self._killpid()
self._removeservice()
class SourcesFile:
"""Interface to working with the bb.sources file.
"""
def __init__(self, path=None):
self.path = path
self.d = None
def get_sources(self, bbname):
"""Get sources associated with name from sources file.
"""
self.load()
return self.d.get(bbname)
def get_bbnames(self):
"""Get BB names from sources file.
"""
self.load()
return list(self.d.keys())
def load(self, force=False):
"""Load sources file.
Use force to reload.
"""
if not force and self.d != None:
return
d = {}
for line in open(self.path).readlines():
line = line.strip()
if line == "" or line.startswith("#"):
continue
t = line.split(":")
bbname = t[0]
l = d.setdefault(bbname, [])
d2 ={
"name": t[1],
"type": t[2],
"source": t[3],
}
l.append(d2)
self.d = d
def get_bbdir(bbbasedir, bbname=None):
"""Return bbdir based on user and optional bb name.
If name starts with "/", then return it as the bbdir. Otherwise,
name cannot contain a "/".
"""
if bbname and bbname.startswith("/"):
return bbname
if bbname and "/" in bbname:
return None
return os.path.join(bbbasedir, bbname)
def get_bbnames(bbbasedir, bbnames=None):
"""Return list of BB names.
Filter bbnames if provided.
"""
try:
_, realbbnames, _ = next(os.walk(bbbasedir))
except:
realbbnames = []
if bbnames == None:
bbnames = realbbnames
else:
realbbnames = set(realbbnames)
bbnames = [bbname for bbname in bbnames if bbname in realbbnames]
return bbnames
def print_usage():
d = {
"progname": os.path.basename(sys.argv[0]),
}
print("""\
usage: %(progname)s [<options>] <cmd> [...]
%(progname)s -h|--help|help
Manage system or user RUSS bulletin boards (BB). A BB hosts RUSS
services. Although the services can be accessed directly using a
path, the standard way is to use the ("+") plus service. By default,
the plus server searches for services at some system ("system") and
user ("override", "fallback") BBs.
System BBs can host services by either a socket (running) or
configuration file (run on demand). The user BBs host services by
configuration file only.
System BBs are configured using the "sync" command which uses the
/etc/russ/bb.sources file which specifies configuration sources used
to set up. Alternatively, the "install" and "remove" commands can
also be used. However, for BBs that are managed using the sources
file, the "sync" operation will overwrite/remove anything that was
installed with "install".
User BBs are configured using the "install" and "remove" commands.
Common options:
--bb <bbname>[,...]
Select named BBs. System default is "system". User
default is "override".
--bb-all Select all BBs.
--debug Print debugging information.
-l Print detailed information when applicable.
--sources <path>
(system) Alternate path of the bb.sources file.
--verbose Print additional information.
Commands:
clean Clean BB.
install # [<newname>]
Install configuration (filename ends with .conf). Use
<newname> to override name derived from #.
list List BB entries. Use -l for details.
list-bb List BBs.
list-sources (system) List sources from sources file.
remove <name> Remove configuration.
restart [<name>,...]
Restart server(s).
resync (system) Clean and sync.
show <name> Show configuration.
start [<name>,...]
Start server(s). Make available for use.
status [<name>,...]
Report status of server(s). Use -l for details.
stop [<name>,...]
Stop server(s). Make unavailable for use.
sync [<tag>,...]
(system) Syncronize local configuration using sources
specified in a bb.sources file. Use <tag> to limit
sources to use.""" % d)
def main(args):
global debug, verbose
try:
bball = False
bbbasedir = None
bbnames = None
cmd = None
debug = os.environ.get("RUBB_DEBUG") == "1"
detail = False
sf = None
sourcesfile = None
username = None
usertype = None
verbose = os.environ.get("RUBB_VERBOSE") == "1"
if os.getuid() == 0:
usertype = "system"
else:
usertype = "user"
while args:
arg = args.pop(0)
if arg == "--bb" and args:
bbnames = args.pop(0).split(",")
bball = False
elif arg == "--bb-all":
bball = True
bbnames = None
elif arg == "--bbbasedir" and args:
bbbasedir = args.pop(0)
elif arg == "--debug":
debug = True
elif arg in ["-h", "--help", "help"]:
print_usage()
sys.exit(0)
elif arg == "-l":
detail = True
elif arg == "--sources" and args:
sourcespath = args.pop(0)
elif arg == "--system":
usertype = "system"
elif arg == "--user" and args:
usertype = "user"
username = args.pop(0)
elif arg == "--verbose":
verbose = True
else:
cmd = arg
break
if username:
try:
pwd.getpwnam(username)
except:
stderr.write("error: bad username (%s)\n" % (username))
sys.exit(1)
if usertype == "system":
bbbasedir = bbbasedir or SYSTEM_BBBASEDIR
bbnames = bbnames or ["system"]
safepaths = SYSTEM_SAFEPATHS
sourcesfile = SYSTEM_SOURCESFILE
else:
if username:
bbbasedir = bbbasedir or os.path.expanduser("~%s/.russ/bb" % (username,))
else:
bbbasedir = bbbasedir or os.path.expanduser("~/.russ/bb")
bbnames = bbnames or ["override"]
safepaths = [bbbasedir]
sourcesfile = None
# validate
if not os.path.exists(bbbasedir):
pass
if sourcesfile and os.path.exists(sourcesfile):
sf = SourcesFile(sourcesfile)
if not cmd:
raise Exception()
except SystemExit:
raise
except:
if debug:
traceback.print_exc()
stderr.write("error: bad/missing arguments\n")
sys.exit(1)
try:
if verbose:
print("bb basedir (%s)" % (bbbasedir,))
print("bb names (%s)" % (bbnames,))
print("sources file (%s)" % (sourcesfile,))
print("cmd (%s)" % (cmd,))
if cmd in ["clean", "list", "list-sources", "restart", "resync", "start", "status", "stop", "sync"]:
# multi bbname commands
if cmd in ["list", "restart", "start", "status", "stop"]:
if not bbbasedir or not os.path.exists(bbbasedir):
stderr.write("error: bb basedir (%s) not found\n" % (bbbasedir,))
sys.exit(1)
if bball:
bbnames = get_bbnames(bbbasedir)
elif cmd in ["list-sources", "resync", "sync"]:
if bball:
bbnames = sf.get_bbnames()
_args = args[:]
for bbname in bbnames:
args = _args[:]
bbdir = get_bbdir(bbbasedir, bbname)
bb = BB(bbdir)
if cmd == "clean" and not args:
names = sorted(bb.get_names())
bb.stop_servers(names)
bb.clean(safepaths)
elif cmd == "list" and not args:
names = sorted(bb.get_names())
if names:
print("%s: %s" % (bbname, " ".join(names)))
elif cmd == "list-sources" and not args:
sources = sf.get_sources(bbname)
if sources:
if detail:
for d in sources:
print("%s:%s" % (bbname, "%(name)s:%(type)s:%(source)s" % d))
else:
print("%s: %s" % (bbname, " ".join([d["name"] for d in sources])))
elif cmd == "restart" and len(args) < 2:
names = args and [args.pop(0)] or sorted(bb.get_names())
bb.stop_servers(names)
bb.start_servers(names)
elif cmd == "resync":
names = sorted(bb.get_names())
bb.stop_servers(names)
sources = sf.get_sources(bb.name)
if sources:
bb.clean(safepaths)
bb.sync(sources)
else:
print("skipping. no source for bb (%s)" % (bb.name,))
elif cmd == "status" and len(args) < 2:
names = args and args.pop(0).split(",") or sorted(bb.get_names())
bb.status_servers(names, detail)
elif cmd == "stop" and len(args) < 2:
names = args and [args.pop(0)] or sorted(bb.get_names())
bb.stop_servers(names)
elif cmd == "sync" and len(args) < 2:
tags = tags and args.pop(0).split(",")
sources = sf.get_sources(bb.name)
if sources:
bb.sync(sources, tags)
else:
print("skipping. no source for bb (%s)" % (bb.name,))
elif cmd == "start" and len(args) < 2:
names = args and [args.pop(0)] or sorted(bb.get_names())
bb.start_servers(names)
else:
stderr.write("error: bad/missing command or arguments\n")
sys.exit(1)
elif cmd in ["install", "remove", "show"]:
# single bbname commands
if cmd in ["show"]:
if not bbbasedir or not os.path.exists(bbbasedir):
stderr.write("error: bb basedir (%s) not found\n" % (bbbasedir,))
sys.exit(1)
if bball:
bbnames = get_bbnames(bbbasedir)
bbname = bbnames[0]
bbdir = get_bbdir(bbbasedir, bbname)
bb = BB(bbdir)
if cmd == "install" and args:
filename = None
newname = None
filename = args.pop(0)
if args:
newname = args.pop(0)
bb.install(filename, newname)
elif cmd == "remove" and len(args) == 1:
name = args.pop(0)
bb.remove(name)
elif cmd == "show" and len(args) == 1:
name = args.pop(0)
bb.show(name)
else:
stderr.write("error: bad/missing command or arguments\n")
sys.exit(1)
elif cmd in ["list-bb"]:
if cmd == "list-bb":
bbnames = get_bbnames(bbbasedir)
if bbnames:
print(" ".join(bbnames))
else:
stderr.write("error: bad/missing command or arguments\n")
sys.exit(1)
else:
stderr.write("error: bad/missing command or arguments\n")
sys.exit(1)
except SystemExit:
raise
except:
if debug:
traceback.print_exc()
stderr.write("error: fail to run command\n")
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "johnmeade/helpy",
"score": 4
} |
#### File: johnmeade/helpy/functional.py
```python
'functional helpers to clean up functional code'
from helpy.misc import do, flatten
from functools import reduce
def identity(x):
return x
def flip(f):
'''Flip the order of arguments for a functions with two arguments.
Especially useful with currying.
Example:
powflip = flip( math.pow )
assert powflip(2,3) == 9
# with currying:
pow2 = curry( flip( math.pow ) )(2)
assert pow2(3) == 9
'''
return lambda x, y: f(y, x)
def foldr(f, xs, acc=None):
'Like reduce, but processes list elements from index -1 to index 0'
if acc == None:
lst = list(reversed(xs))
return reduce(f, lst[1:], lst[0])
else:
return reduce(f, reversed(xs), acc)
def appended(lst, *x):
'''Append all passed args to the input list. Args can be any
Example:
appended( [1,2], 3 )
>>> [1, 2, 3]
appended( [1,2], [(3,[2,6,(9,8,7)]),[4,5]], 4 )
>>> [1, 2, 3, 2, 6, 9, 8, 7, 4, 5, 4]
'''
do( lst.append(y) for y in flatten(x) )
return lst
def argsorted(lst, **kwa):
'''Return the list of indices that sort the input list. This uses default
python sorting key, optionally supply your own with the "key" keyword arg.
examples:
argsorted([49,52,31]) == [2,0,1]
argsorted([49,52,31], key=lambda x: -x) == [1,0,2]
'''
# cast to list to handle python3 lazy functools
xs = list(lst)
# actual key function will receive a list index, so we wrap transformation
# around the it.
if 'key' in kwa:
key = kwa['key']
if not callable(key):
raise Exception("Keyword argument 'key' must be callable")
wrapped_key = lambda i: key( xs[i] )
else:
wrapped_key = xs.__getitem__
return sorted(range(len(xs)), key=wrapped_key)
``` |
{
"source": "johnMedlockDev/StockDataMiningSolution",
"score": 3
} |
#### File: main/classes/JsonIO.py
```python
from json import dump
import json
from main.classes.PathHelper import PathHelper
from main.enums.EJsonFolder import EJsonFolder
from main.classes.Logger import Logger
import os
class JsonIO():
def __init__(self) -> None:
self.__oldJsonFilePath__ = ""
def WriteJsonToFile(self, directory: EJsonFolder, filename: str, jsonObject: dict):
try:
with open(f'{PathHelper.JsonRoot()}\\{directory.value}\\{filename.upper()}.json', 'w') as outfile:
dump(jsonObject, outfile)
Logger.LogInfo(
f"Successful JSON file creation of {filename} in {directory.value}!")
except:
Logger.LogError(f"Failure JSON file creation of {filename}!")
def ReadJsonFromFile(self, directory: EJsonFolder):
filenames = os.listdir(f"./io/json/{directory.value}")
symbolAndJsonData = []
for filename in filenames:
if filename.endswith(".json"):
symbolAndJsonData = self.OpenJsonFile(
directory, filename)
self.MoveJsonFile(directory,
EJsonFolder.DONE, filename)
break
return symbolAndJsonData
def OpenJsonFile(self, directory: EJsonFolder, filename: str):
self.__oldJsonFilePath__ = f"{PathHelper.JsonRoot()}\\{directory.value}\\{filename}"
symbol = filename.replace('.json', "")
try:
with open(self.__oldJsonFilePath__) as jsonFile:
jsonData = json.load(jsonFile)
return [symbol, jsonData]
except:
Logger.LogError(
f"Couldn't open file at {self.__oldJsonFilePath__}")
return []
def MoveJsonFile(self, directory: EJsonFolder, subDirectory: EJsonFolder, filename: str):
newJsonFilePath = f"{PathHelper.JsonRoot()}\\{directory.value}\\{subDirectory.value}\\{filename}"
os.replace(self.__oldJsonFilePath__, newJsonFilePath)
Logger.LogInfo(
f" Moved file from {self.__oldJsonFilePath__} to {newJsonFilePath}")
```
#### File: main/classes/Logger.py
```python
import logging
from datetime import date
class Logger():
@staticmethod
def LogError(message: str):
logging.basicConfig(filename=f"io/logs/{date.today()}.log",
format='%(asctime)s %(message)s',
filemode='a', level=logging.DEBUG)
logging.getLogger().error(f"ERROR: {message}")
@staticmethod
def LogInfo(message: str):
logging.basicConfig(filename=f"io/logs/{date.today()}.log",
format='%(asctime)s %(message)s',
filemode='a', level=logging.DEBUG)
logging.getLogger().info(f"INFO: {message}")
@staticmethod
def LogDebug(message: str):
logging.basicConfig(filename=f"io/logs/{date.today()}.log",
format='%(asctime)s %(message)s',
filemode='a', level=logging.DEBUG)
logging.getLogger().debug(f"DEBUG: {message}")
```
#### File: main/classes/SQLIOHandler.py
```python
from main.enums.EJsonFolder import EJsonFolder
from main.classes.SQLIO import SQLIO
class SQLIOHandler():
def ProcessAllJsonFilesIntoDatabase(self):
folders = [EJsonFolder.ANNUALBALANCE, EJsonFolder.ANNUALCASH, EJsonFolder.ANNUALINCOME, EJsonFolder.QUARTERLYBALANCE,
EJsonFolder.QUARTERLYCASH, EJsonFolder.QUARTERLYINCOME, EJsonFolder.PRICES, EJsonFolder.OVERVIEW]
for folder in folders:
self.__sqlIo__ = SQLIO(folder)
print(f"Starting to process files in {folder.value} folder.")
self.__sqlIo__.InsertDataFromJsonBatch()
print(f"Finished processing files in {folder.value} folder.")
```
#### File: main/classes/SymbolListGenerator.py
```python
from main.classes.PathHelper import PathHelper
import os
from main.enums.EJsonFolder import EJsonFolder
from pandas import read_csv
from pathlib import Path
from main.classes.Logger import Logger
from os import listdir
from os.path import isfile, join
class SymbolListGenerator():
def __init__(self, fileName: str, columnName: str):
self.__DF__ = read_csv(
f'{Path().absolute()}\\io\\csv\\source\\{fileName}.csv')[columnName].to_list()
self.__parentDirectory__ = ''
self.__childDirectory__ = ''
def CreateFilteredListOfSymbols(self, directories: list(EJsonFolder)):
self.InitializeDirectories(directories)
listOfPersistedSymbols = self.GetListPersistedOfSymbols()
listOfSymbols = self.__DF__
if self.__childDirectory__ == EJsonFolder.REDO:
return self.FliterRedoList(listOfPersistedSymbols)
for symbol in listOfPersistedSymbols:
if symbol in listOfSymbols:
listOfSymbols.remove(symbol)
return listOfSymbols
def InitializeDirectories(self, directories: list(EJsonFolder)):
try:
self.__parentDirectory__, self.__childDirectory__ = directories
except ValueError:
self.__parentDirectory__ = directories[0]
self.__childDirectory__ = EJsonFolder.NONE
def GetListPersistedOfSymbols(self):
listOfPersistedSymbols = list(
set(self.GetListFromParentDirectory()+self.GetListFromDoneDirectory()))
return listOfPersistedSymbols
def GetListFromParentDirectory(self):
jsonPath = f"{self.GetParentPath()}"
return [f.replace('.json', '') for f in listdir(jsonPath) if isfile(join(jsonPath, f))]
def GetListFromDoneDirectory(self):
jsonPath = f"{self.GetParentPath()}\{EJsonFolder.DONE.value}"
return [f.replace('.json', '') for f in listdir(
jsonPath) if isfile(join(jsonPath, f))]
def FliterRedoList(self, listOfPersistedSymbols: list):
jsonPath = f"{self.GetParentPath()}\{EJsonFolder.REDO.value}"
filesInRedoFolder = self.GetListFromRedoDirectory()
for file in listOfPersistedSymbols:
if file in filesInRedoFolder:
filesInRedoFolder.remove(file)
Logger.LogInfo(
f"Removed file {file}.json from {self.__parentDirectory__.value}\redo because it already exist in {self.__parentDirectory__.value}\done.")
os.remove(f"{jsonPath}\\{file}.json")
return filesInRedoFolder
def GetListFromRedoDirectory(self):
jsonPath = f"{self.GetParentPath()}\\{EJsonFolder.REDO.value}"
return [f.replace('.json', '') for f in listdir(
jsonPath) if isfile(join(jsonPath, f))]
def GetParentPath(self):
return f"{PathHelper.JsonRoot()}\{self.__parentDirectory__.value}"
```
#### File: main/enums/EJsonFolder.py
```python
from enum import Enum, unique
@unique
class EJsonFolder(Enum):
ANNUALBALANCE = 'annual-balance-sheets'
ANNUALCASH = 'annual-cash-flows'
ANNUALDATES = 'annual-earnings-dates'
ANNUALINCOME = 'annual-income-statements'
QUARTERLYBALANCE = 'quarterly-balance-sheets'
QUARTERLYCASH = 'quarterly-cash-flows'
QUARTERLYDATES = 'quarterly-earnings-dates'
QUARTERLYINCOME = 'quarterly-income-statements'
PRICES = 'prices'
OVERVIEW = 'company-overview'
REDO = 'redo'
DONE = 'done'
NONE = ''
def describe(self):
# self is the member here
return self.name, self.value
def __str__(self):
return 'my custom str! {0}'.format(self.value)
``` |
{
"source": "johnmelodyme/BlockchainProgramming",
"score": 3
} |
#### File: johnmelodyme/BlockchainProgramming/block.py
```python
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@Author : <NAME>
@Copyright: <NAME> & <NAME> © Copyright 2020
@INPIREDBYGF: <NAME> <3
"""
import datetime
import hashlib
class Block:
def __init__(self, previous_block_hash, data, timestamp):
self.previous_block_hash = previous_block_hash
self.data = data
self.timestamp = timestamp
self.hash = self.get_hash()
@staticmethod
def create_genesis_block():
return Block("0", "0", datetime.datetime.now())
def get_hash(self):
header_bin = (str(self.previous_block_hash) +
str(self.data) +
str(self.timestamp))
inner_hash = hashlib.sha256(header_bin.encode()).hexdigest().encode()
outer_hash = hashlib.sha256(inner_hash).hexdigest()
return outer_hash
``` |
{
"source": "johnmelodyme/chatbot",
"score": 3
} |
#### File: chatbot/aiml/WordSub.py
```python
try:
dict
except:
from UserDict import UserDict as dict
import ConfigParser
import re
import string
class WordSub(dict):
"""All-in-one multiple-string-substitution class."""
def _wordToRegex(self, word):
"""Convert a word to a regex object which matches the word."""
if word != "" and word[0].isalpha() and word[-1].isalpha():
return "\\b%s\\b" % re.escape(word)
else:
return r"\b%s\b" % re.escape(word)
def _update_regex(self):
"""Build re object based on the keys of the current
dictionary.
"""
self._regex = re.compile("|".join(map(self._wordToRegex, self.keys())))
self._regexIsDirty = False
def __init__(self, defaults={}):
"""Initialize the object, and populate it with the entries in
the defaults dictionary.
"""
self._regex = None
self._regexIsDirty = True
for k, v in defaults.items():
self[k] = v
def __call__(self, match):
"""Handler invoked for each regex match."""
return self[match.group(0)]
def __setitem__(self, i, y):
self._regexIsDirty = True
# for each entry the user adds, we actually add three entrys:
super(type(self), self).__setitem__(
string.lower(i), string.lower(y)) # key = value
super(type(self), self).__setitem__(
string.capwords(i), string.capwords(y)) # Key = Value
super(type(self), self).__setitem__(
string.upper(i), string.upper(y)) # KEY = VALUE
def sub(self, text):
"""Translate text, returns the modified text."""
if self._regexIsDirty:
self._update_regex()
return self._regex.sub(self, text)
# self-test
if __name__ == "__main__":
subber = WordSub()
subber["apple"] = "banana"
subber["orange"] = "pear"
subber["banana"] = "apple"
subber["he"] = "she"
subber["I'd"] = "I would"
# test case insensitivity
inStr = "I'd like one apple, one Orange and one BANANA."
outStr = "I Would like one banana, one Pear and one APPLE."
if subber.sub(inStr) == outStr:
print "Test #1 PASSED"
else:
print "Test #1 FAILED: '%s'" % subber.sub(inStr)
inStr = "He said he'd like to go with me"
outStr = "She said she'd like to go with me"
if subber.sub(inStr) == outStr:
print "Test #2 PASSED"
else:
print "Test #2 FAILED: '%s'" % subber.sub(inStr)
```
#### File: src/chatbot/db.py
```python
import threading
import sys
import time
import logging
import os
import traceback
from collections import defaultdict
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('hr.chatbot.db')
SHARE_COLLECTION_NAME = 'runtime'
SHARE_COLLECTION_SIZE = 1e9
class MongoDBCollectionListener(object):
def handle_incoming_data(self, data):
return NotImplemented
class MongoDB(object):
def __init__(self, dbname='hr'):
self.client = None
self.dbname = dbname
self.listeners = []
self.subscribers = defaultdict(list)
def get_share_collection(self):
collection_names = self.client[self.dbname].collection_names()
if SHARE_COLLECTION_NAME not in collection_names:
logger.info("Creating shared collection")
self.client[self.dbname].create_collection(
SHARE_COLLECTION_NAME, capped=True, size=SHARE_COLLECTION_SIZE)
return self.client[self.dbname][SHARE_COLLECTION_NAME]
def add_listener(self, listener):
if isinstance(listener, MongoDBCollectionListener):
self.listeners.append(listener)
else:
raise ValueError("Listener must be the class or sub-class of \
MongoDBCollectionListener")
def publish(self, topic, msg):
collection = self.get_share_collection()
try:
collection.insert_one({'topic': topic, 'msg': msg})
except Exception as ex:
logger.error(ex)
def subscribe(self, topic, subscriber):
if isinstance(subscriber, MongoDBCollectionListener):
if subscriber in self.subscribers[topic]:
logger.warn("Subscriber has already registered")
return
self.subscribers[topic].append(subscriber)
self.start_monitoring({'topic': topic})
else:
raise ValueError("Subscriber must be the class or sub-class of \
MongoDBCollectionListener")
def start_monitoring(self, filter={}):
timer = threading.Timer(0, self._start_monitoring, kwargs=filter)
timer.daemon = True
timer.start()
def _start_monitoring(self, **filter):
import pymongo
while self.client is None:
time.sleep(0.1)
collection = self.get_share_collection()
tailN = 0
while True:
cursor = collection.find(filter,
cursor_type=pymongo.CursorType.TAILABLE_AWAIT,
no_cursor_timeout=True)
count = collection.find(filter).count()
cursor.skip(count - tailN)
logger.info('Cursor created')
try:
while cursor.alive:
for doc in cursor:
for l in self.listeners:
l.handle_incoming_data(doc)
for topic, subscribers in self.subscribers.iteritems():
if doc.get('topic') == topic:
for sub in subscribers:
sub.handle_incoming_data(doc)
time.sleep(0.2)
logger.info('Cursor alive %s', cursor.alive)
except Exception as ex:
logger.error(traceback.format_exc())
finally:
cursor.close()
time.sleep(2)
def _init_mongodb(mongodb, host='localhost', port=27017,
socketTimeoutMS=2000, serverSelectionTimeoutMS=1000):
import pymongo
def _init_mongo_client(mongodb):
attempt = 0
active = False
while mongodb.client is None and attempt < 3:
mongodb.client = pymongo.MongoClient(
'mongodb://{}:{}/'.format(host, port),
socketTimeoutMS=socketTimeoutMS,
serverSelectionTimeoutMS=serverSelectionTimeoutMS)
try:
mongodb.client.admin.command('ismaster')
logger.warn("Activate mongodb, %s", mongodb)
active = True
except pymongo.errors.ConnectionFailure:
attempt += 1
time.sleep(2)
mongodb.client = None
time.sleep(0.2)
if not active:
logger.warn("MongoDB server is not available")
timer = threading.Timer(0, _init_mongo_client, (mongodb,))
timer.daemon = True
timer.start()
logger.info("Thread starts")
def get_mongodb(dbname='hr', **kwargs):
mongodb = MongoDB(dbname)
_init_mongodb(mongodb, **kwargs)
return mongodb
if __name__ == '__main__':
mongodb = get_mongodb()
while mongodb.client is None:
time.sleep(0.1)
print mongodb.client.server_info()
def print_fps():
global counter
start_ts = time.time()
while True:
time.sleep(1)
end_ts = time.time()
print counter/(end_ts - start_ts)
with lock:
counter = 0
start_ts = end_ts
counter = 0
lock = threading.RLock()
class Listener(MongoDBCollectionListener):
def handle_incoming_data(self, data):
print data['msg']['width'], data['msg']['height']
global counter
with lock:
counter += 1
mongodb.subscribe('camera', Listener())
job = threading.Timer(0, print_fps)
job.daemon = True
job.start()
while True:
time.sleep(1)
```
#### File: chatbot/server/chatbot_agent.py
```python
import traceback
import logging
import random
import os
import re
import sys
import numpy as np
import datetime as dt
reload(sys)
sys.setdefaultencoding('utf-8')
import atexit
from collections import defaultdict, OrderedDict
from threading import RLock
sync = RLock()
SUCCESS = 0
WRONG_CHARACTER_NAME = 1
NO_PATTERN_MATCH = 2
INVALID_SESSION = 3
INVALID_QUESTION = 4
TRANSLATE_ERROR = 5
logger = logging.getLogger('hr.chatbot.server.chatbot_agent')
from loader import load_characters, dyn_properties
from config import CHARACTER_PATH, RESET_SESSION_BY_HELLO, config
CHARACTERS = load_characters(CHARACTER_PATH)
REVISION = os.environ.get('HR_CHATBOT_REVISION')
LOCATION = dyn_properties.get('location')
IP = dyn_properties.get('ip')
from session import ChatSessionManager
session_manager = ChatSessionManager()
DISABLE_QUIBBLE = True
FALLBACK_LANG = 'en-US'
from chatbot.utils import (shorten, str_cleanup, get_weather, parse_weather,
do_translate, norm2)
from chatbot.words2num import words2num
from chatbot.server.character import TYPE_AIML, TYPE_CS
from operator import add, sub, mul, truediv, pow
import math
from chatbot.server.template import render
OPERATOR_MAP = {
'[add]': add,
'[sub]': sub,
'[mul]': mul,
'[div]': truediv,
'[pow]': pow,
}
RESPONSE_TYPE_WEIGHTS = {
'pass': 100,
'nogoodmatch': 50,
'quibble': 40,
'gambit': 50,
'repeat': 0,
'pickup': 0,
'es': 20,
'markov': 5,
}
def get_character(id, lang=None, ns=None):
for character in CHARACTERS:
if (ns is not None and character.name != ns) or character.id != id:
continue
if lang is None:
return character
elif lang in character.languages:
return character
def add_character(character):
if character.id not in [c.id for c in CHARACTERS]:
CHARACTERS.append(character)
return True, "Character added"
# TODO: Update character
else:
return False, "Character exists"
def is_local_character(character):
return character.local
def get_characters_by_name(name, local=True, lang=None, user=None):
characters = []
_characters = [c for c in CHARACTERS if c.name == name]
if local:
_characters = [c for c in _characters if is_local_character(c)]
if lang is not None:
_characters = [c for c in _characters if lang in c.languages]
if user is not None:
for c in _characters:
toks = c.id.split('/')
if len(toks) == 2:
if toks[0] == user:
characters.append(c)
else:
characters.append(c)
else:
characters = _characters
if not characters:
logger.warn('No character is satisfied')
return characters
def list_character(lang, sid):
sess = session_manager.get_session(sid)
if sess is None:
return []
characters = get_responding_characters(lang, sid)
weights = get_weights(characters, sess)
return [(c.name, c.id, w, c.level, c.dynamic_level) for c, w in zip(characters, weights)]
def list_character_names():
names = list(set([c.name for c in CHARACTERS if c.name != 'dummy']))
return names
def set_weights(param, lang, sid):
sess = session_manager.get_session(sid)
if sess is None:
return False, "No session"
if param == 'reset':
sess.session_context.weights = {}
return True, "Weights are reset"
weights = {}
characters = get_responding_characters(lang, sid)
try:
for w in param.split(','):
k, v = w.split('=')
v = float(v)
if v>1 or v<0:
return False, "Weight must be in the range [0, 1]"
try:
k = int(k)
weights[characters[k].id] = v
except ValueError:
weights[k] = v
except Exception as ex:
logger.error(ex)
logger.error(traceback.format_exc())
return False, "Wrong weight format"
sess.session_context.weights = weights
return True, "Weights are updated"
def get_weights(characters, sess):
weights = []
if hasattr(sess.session_context, 'weights') and sess.session_context.weights:
for c in characters:
if c.id in sess.session_context.weights:
weights.append(sess.session_context.weights.get(c.id))
else:
weights.append(c.weight)
else:
weights = [c.weight for c in characters]
return weights
def set_context(prop, sid):
sess = session_manager.get_session(sid)
if sess is None:
return False, "No session"
for c in CHARACTERS:
try:
c.set_context(sess, prop)
except Exception:
pass
return True, "Context is updated"
def remove_context(keys, sid):
sess = session_manager.get_session(sid)
if sess is None:
return False, "No session"
for c in CHARACTERS:
if c.type != TYPE_AIML and c.type != TYPE_CS:
continue
try:
for key in keys:
c.remove_context(sess, key)
except Exception:
pass
return True, "Context is updated"
def get_context(sid, lang):
sess = session_manager.get_session(sid)
if sess is None:
return False, "No session"
characters = get_responding_characters(lang, sid)
context = {}
for c in characters:
if not c.stateful:
continue
try:
context.update(c.get_context(sess))
except Exception as ex:
logger.error("Get context error, {}".format(ex))
logger.error(traceback.format_exc())
for k in context.keys():
if k.startswith('_'):
del context[k]
return True, context
def update_config(**kwargs):
keys = []
for key, value in kwargs.items():
if key in config:
if isinstance(value, unicode):
value = str(value)
config[key] = value
if key not in keys:
keys.append(key)
else:
logger.warn("Unknown config {}".format(key))
if len(keys) > 0:
logger.warn("Configuration is updated")
for key in keys:
logger.warn("{}={}".format(key, config[key]))
return True, "Configuration is updated"
else:
return False, "No configuration is updated"
def preprocessing(question, lang, session):
question = question.lower().strip()
question = ' '.join(question.split()) # remove consecutive spaces
question = question.replace('sofia', 'sophia')
reduction = get_character('reduction')
if reduction is not None:
response = reduction.respond(question, lang, session, query=True, request_id=request_id)
reducted_text = response.get('text')
if reducted_text:
question = reducted_text
return question
def _ask_characters(characters, question, lang, sid, query, request_id, **kwargs):
sess = session_manager.get_session(sid)
if sess is None:
return
used_charaters = []
data = sess.session_context
user = getattr(data, 'user')
botname = getattr(data, 'botname')
weights = get_weights(characters, sess)
weighted_characters = zip(characters, weights)
weighted_characters = [wc for wc in weighted_characters if wc[1]>0]
logger.info("Weights {}".format(weights))
_question = preprocessing(question, lang, sess)
response = {}
hit_character = None
answer = None
cross_trace = []
cached_responses = defaultdict(list)
control = get_character('control')
if control is not None:
_response = control.respond(_question, lang, sess, query, request_id)
_answer = _response.get('text')
if _answer == '[tell me more]':
cross_trace.append((control.id, 'control', _response.get('trace') or 'No trace'))
if sess.last_used_character:
if sess.cache.that_question is None:
sess.cache.that_question = sess.cache.last_question
context = sess.last_used_character.get_context(sess)
if 'continue' in context and context.get('continue'):
_answer, res = shorten(context.get('continue'), 140)
response['text'] = answer = _answer
response['botid'] = sess.last_used_character.id
response['botname'] = sess.last_used_character.name
sess.last_used_character.set_context(sess, {'continue': res})
hit_character = sess.last_used_character
cross_trace.append((sess.last_used_character.id, 'continuation', 'Non-empty'))
else:
_question = sess.cache.that_question.lower().strip()
cross_trace.append((sess.last_used_character.id, 'continuation', 'Empty'))
elif _answer.startswith('[weather]'):
template = _answer.replace('[weather]', '')
cross_trace.append((control.id, 'control', _response.get('trace') or 'No trace'))
context = control.get_context(sess)
if context:
location = context.get('querylocation')
prop = parse_weather(get_weather(location))
if prop:
try:
_answer = template.format(location=location, **prop)
if _answer:
answer = _answer
response['text'] = _answer
response['botid'] = control.id
response['botname'] = control.name
except Exception as ex:
cross_trace.append((control.id, 'control', 'No answer'))
logger.error(ex)
logger.error(traceback.format_exc())
else:
cross_trace.append((control.id, 'control', 'No answer'))
elif _answer in OPERATOR_MAP.keys():
opt = OPERATOR_MAP[_answer]
cross_trace.append((control.id, 'control', _response.get('trace') or 'No trace'))
context = control.get_context(sess)
if context:
item1 = context.get('item1')
item2 = context.get('item2')
item1 = words2num(item1)
item2 = words2num(item2)
if item1 is not None and item2 is not None:
try:
result = opt(item1, item2)
img = math.modf(result)[0]
if img < 1e-6:
result_str = '{:d}'.format(int(result))
else:
result_str = 'about {:.4f}'.format(result)
if result > 1e20:
answer = "The number is too big. You should use a calculator."
else:
answer = "The answer is {result}".format(result=result_str)
except ZeroDivisionError:
answer = "Oh, the answer is not a number"
except Exception as ex:
logger.error(ex)
logger.error(traceback.format_exc())
answer = "Sorry, something goes wrong. I can't calculate it."
response['text'] = answer
response['botid'] = control.id
response['botname'] = control.name
else:
cross_trace.append((control.id, 'control', 'No answer'))
else:
if _answer and not re.findall(r'\[.*\].*', _answer):
cross_trace.append((control.id, 'control', _response.get('trace') or 'No trace'))
hit_character = control
answer = _answer
response = _response
else:
cross_trace.append((control.id, 'control', 'No answer'))
for c in characters:
try:
c.remove_context(sess, 'continue')
except NotImplementedError:
pass
sess.cache.that_question = None
def _ask_character(stage, character, weight, good_match=False, reuse=False):
logger.info("Asking character {} \"{}\" in stage {}".format(
character.id, _question, stage))
if not reuse and character.id in used_charaters:
cross_trace.append((character.id, stage, 'Skip used tier'))
return False, None, None
if character.id in used_charaters and character.type == TYPE_CS:
cross_trace.append((character.id, stage, 'Skip CS tier'))
return False, None, None
used_charaters.append(character.id)
answer = None
answered = False
if weight == 0:
cross_trace.append((character.id, stage, 'Disabled'))
logger.warn("Character \"{}\" in stage {} is disabled".format(
character.id, stage))
return False, None, None
response = character.respond(_question, lang, sess, query, request_id)
answer = str_cleanup(response.get('text', ''))
trace = response.get('trace')
if answer:
if 'pickup' in character.id:
cached_responses['pickup'].append((response, answer, character))
return False, None, None
if good_match:
if response.get('exact_match') or response.get('ok_match'):
if response.get('gambit'):
if random.random() < 0.3:
logger.info("{} has gambit but dismissed".format(character.id))
cross_trace.append((character.id, stage, 'Ignore gambit answer. Answer: {}, Trace: {}'.format(answer, trace)))
cached_responses['gambit'].append((response, answer, character))
else:
logger.info("{} has gambit".format(character.id))
answered = True
else:
logger.info("{} has good match".format(character.id))
answered = True
else:
if not response.get('bad'):
logger.info("{} has no good match".format(character.id))
cross_trace.append((character.id, stage, 'No good match. Answer: {}, Trace: {}'.format(answer, trace)))
cached_responses['nogoodmatch'].append((response, answer, character))
elif response.get('bad'):
cross_trace.append((character.id, stage, 'Bad answer. Answer: {}, Trace: {}'.format(answer, trace)))
cached_responses['bad'].append((response, answer, character))
elif DISABLE_QUIBBLE and response.get('quibble'):
cross_trace.append((character.id, stage, 'Quibble answer. Answer: {}, Trace: {}'.format(answer, trace)))
cached_responses['quibble'].append((response, answer, character))
else:
answered = True
if answered:
if random.random() < weight:
cross_trace.append((character.id, stage, 'Trace: {}'.format(trace)))
else:
answered = False
cross_trace.append((character.id, stage, 'Pass through. Answer: {}, Weight: {}, Trace: {}'.format(answer, weight, trace)))
logger.info("{} has answer but dismissed".format(character.id))
if character.id == 'markov':
cached_responses['markov'].append((response, answer, character))
elif character.id == 'es':
if response.get('exact_match') or response.get('ok_match'):
cached_responses['es'].append((response, answer, character))
else:
cached_responses['nogoodmatch'].append((response, answer, character))
else:
cached_responses['pass'].append((response, answer, character))
else:
if response.get('repeat'):
answer = response.get('repeat')
cross_trace.append((character.id, stage, 'Repetitive answer. Answer: {}, Trace: {}'.format(answer, trace)))
cached_responses['repeat'].append((response, answer, character))
else:
logger.info("{} has no answer".format(character.id))
cross_trace.append((character.id, stage, 'No answer. Trace: {}'.format(trace)))
return answered, answer, response
# If the last input is a question, then try to use the same tier to
# answer it.
if not answer:
if sess.open_character in characters:
answered, _answer, _response = _ask_character(
'question', sess.open_character, 1, good_match=True)
if answered:
hit_character = sess.open_character
answer = _answer
response = _response
# Try the first tier to see if there is good match
if not answer:
c, weight = weighted_characters[0]
answered, _answer, _response = _ask_character(
'priority', c, weight, good_match=True)
if answered:
hit_character = c
answer = _answer
response = _response
# Select tier that is designed to be proper to answer the question
if not answer:
for c, weight in weighted_characters:
if c.is_favorite(_question):
answered, _answer, _response = _ask_character(
'favorite', c, 1)
if answered:
hit_character = c
answer = _answer
response = _response
break
# Check the last used character
if not answer:
if sess.last_used_character and sess.last_used_character.dynamic_level:
for c, weight in weighted_characters:
if sess.last_used_character.id == c.id:
answered, _answer, _response = _ask_character(
'last used', c, weight)
if answered:
hit_character = c
answer = _answer
response = _response
break
# Check the loop
if not answer:
for c, weight in weighted_characters:
answered, _answer, _response = _ask_character(
'loop', c, weight, reuse=True)
if answered:
hit_character = c
answer = _answer
response = _response
break
if not answer:
logger.info("Picking answer from cache %s" % cached_responses.keys())
weights = np.array([float(RESPONSE_TYPE_WEIGHTS.get(k, 0)) for k in cached_responses.keys()])
pweights = weights/sum(weights)
key = np.random.choice(cached_responses.keys(), p=pweights)
logger.info("Picked %s from cache by p=%s" % (key, pweights))
response, answer, hit_character = cached_responses.get(key)[0]
response['text'] = answer
cross_trace.append(
(hit_character.id, key,
response.get('trace') or 'No trace'))
if answer and re.match('.*{.*}.*', answer):
logger.info("Template answer {}".format(answer))
try:
response['orig_text'] = answer
render_result = render(answer)
answer = render_result['render_result']
lineno = render_result['variables'].get('lineno')
response['text'] = answer
response['lineno'] = lineno
if re.search('{.*}', answer):
logger.error("answer contains illegal characters")
answer = re.sub('{.*}', '', answer)
except Exception as ex:
answer = ''
response['text'] = ''
logger.error("Error in rendering template, {}".format(ex))
dummy_character = get_character('dummy', lang)
if not answer and dummy_character:
if response.get('repeat'):
response = dummy_character.respond("REPEAT_ANSWER", lang, sid, query)
else:
response = dummy_character.respond("NO_ANSWER", lang, sid, query)
hit_character = dummy_character
answer = str_cleanup(response.get('text', ''))
if not query and hit_character is not None:
logger.info("Hit by %s", hit_character)
response['AnsweredBy'] = hit_character.id
sess.last_used_character = hit_character
hit_character.use(sess, response)
if is_question(answer.lower().strip()):
if hit_character.dynamic_level:
sess.open_character = hit_character
logger.info("Set open dialog character {}".format(
hit_character.id))
else:
sess.open_character = None
response['ModQuestion'] = _question
response['trace'] = cross_trace
return response
def is_question(question):
if not isinstance(question, unicode):
question = question.decode('utf-8')
return question.endswith('?') or question.endswith('?')
def get_responding_characters(lang, sid):
sess = session_manager.get_session(sid)
if sess is None:
return []
if not hasattr(sess.session_context, 'botname'):
return []
botname = sess.session_context.botname
user = sess.session_context.user
# current character > local character with the same name > solr > generic
responding_characters = get_characters_by_name(
botname, local=False, lang=lang, user=user)
responding_characters = sorted(responding_characters, key=lambda x: x.level)
generic = get_character('generic', lang)
if generic:
if generic not in responding_characters:
# get shared properties
character = get_character(botname)
generic.set_properties(character.get_properties())
responding_characters.append(generic)
else:
logger.info("Generic character is not found")
responding_characters = sorted(responding_characters, key=lambda x: x.level)
return responding_characters
def rate_answer(sid, idx, rate):
sess = session_manager.get_session(sid)
if sess is None:
logger.error("Session doesn't exist")
return False
try:
return sess.rate(rate, idx)
except Exception as ex:
logger.error("Rate error: {}".format(ex))
return False
return True
def ask(question, lang, sid, query=False, request_id=None, **kwargs):
"""
return (response dict, return code)
"""
response = {'text': '', 'emotion': '', 'botid': '', 'botname': ''}
response['lang'] = lang
sess = session_manager.get_session(sid)
if sess is None:
return response, INVALID_SESSION
if not question or not question.strip():
return response, INVALID_QUESTION
botname = sess.session_context.botname
if not botname:
logger.error("No botname is specified")
user = sess.session_context.user
client_id = sess.session_context.client_id
response['OriginalQuestion'] = question
input_translated = False
output_translated = False
fallback_mode = False
responding_characters = get_responding_characters(lang, sid)
if not responding_characters and lang != FALLBACK_LANG:
fallback_mode = True
logger.warn("Use %s medium language, in fallback mode", FALLBACK_LANG)
responding_characters = get_responding_characters(FALLBACK_LANG, sid)
try:
input_translated, question = do_translate(question, FALLBACK_LANG)
except Exception as ex:
logger.error(ex)
logger.error(traceback.format_exc())
return response, TRANSLATE_ERROR
if not responding_characters:
logger.error("Wrong characer name")
return response, WRONG_CHARACTER_NAME
# Handle commands
if question == ':reset':
session_manager.dump(sid)
session_manager.reset_session(sid)
logger.warn("Session {} is reset by :reset".format(sid))
for c in responding_characters:
if c.is_command(question):
response.update(c.respond(question, lang, sess, query, request_id))
return response, SUCCESS
response['yousaid'] = question
sess.set_characters(responding_characters)
if RESET_SESSION_BY_HELLO and question:
question_tokens = question.lower().strip().split()
if 'hi' in question_tokens or 'hello' in question_tokens:
session_manager.dump(sid)
session_manager.reset_session(sid)
logger.warn("Session {} is reset by greeting".format(sid))
if question and question.lower().strip() in ["what's new"]:
sess.last_used_character = None
sess.open_character = None
logger.info("Triggered new topic")
logger.info("Responding characters {}".format(responding_characters))
if fallback_mode:
_response = _ask_characters(
responding_characters, question, FALLBACK_LANG, sid, query, request_id, **kwargs)
else:
_response = _ask_characters(
responding_characters, question, lang, sid, query, request_id, **kwargs)
#if not query:
# Sync session data
#if sess.last_used_character is not None:
# context = sess.last_used_character.get_context(sess)
# for c in responding_characters:
# if c.id == sess.last_used_character.id:
# continue
# try:
# c.set_context(sess, context)
# except NotImplementedError:
# pass
# for c in responding_characters:
# if c.type != TYPE_AIML:
# continue
# try:
# c.check_reset_topic(sid)
# except Exception:
# continue
record = OrderedDict()
record['Datetime'] = dt.datetime.utcnow()
record['Question'] = response.get('OriginalQuestion')
record['Rate'] = ''
record['Lang'] = lang
record['Location'] = LOCATION
record['ServerIP'] = IP
record['RequestId'] = request_id
record['Revision'] = REVISION
record['ClientId'] = client_id
record['User'] = user
record['Marker'] = kwargs.get('marker')
record['BotName'] = botname
record['RunId'] = kwargs.get('run_id')
if _response is not None and _response.get('text'):
response.update(_response)
response['OriginalAnswer'] = response.get('text')
if fallback_mode:
try:
answer = response.get('text')
output_translated, answer = do_translate(answer, lang)
response['text'] = answer
except Exception as ex:
logger.error(ex)
logger.error(traceback.format_exc())
return response, TRANSLATE_ERROR
record['Answer'] = response.get('text')
record['LineNO'] = response.get('lineno')
record['OriginalAnswer'] = response.get('OriginalAnswer')
record['TranslatedQuestion'] = question
record['Topic'] = response.get('topic')
record['ModQuestion'] = response.get('ModQuestion')
record['Trace'] = response.get('trace')
record['AnsweredBy'] = response.get('AnsweredBy')
record['TranslateOutput'] = output_translated
record['TranslateInput'] = input_translated
record['NormQuestion'] = norm2(response.get('OriginalQuestion'))
record['NormAnswer'] = norm2(response.get('text'))
sess.add(record)
logger.info("Ask {}, response {}".format(response['OriginalQuestion'], response))
response.update(record)
response['Datetime'] = str(response['Datetime'])
return response, SUCCESS
else:
logger.error("No pattern match")
response.update(record)
response['Datetime'] = str(response['Datetime'])
return response, NO_PATTERN_MATCH
def said(sid, text):
sess = session_manager.get_session(sid)
if sess is None:
return False, "No session"
control = get_character('control')
if control is not None:
control.said(sess, text)
return True, "Done"
return False, 'No control tier'
def dump_history():
return session_manager.dump_all()
def dump_session(sid):
return session_manager.dump(sid)
def reload_characters(**kwargs):
global CHARACTERS, REVISION
with sync:
characters = None
logger.info("Reloading")
try:
characters = load_characters(CHARACTER_PATH)
del CHARACTERS[:]
CHARACTERS = characters
revision = kwargs.get('revision')
if revision:
REVISION = revision
logger.info("Revision {}".format(revision))
except Exception as ex:
logger.error("Reloading characters error {}".format(ex))
def rebuild_cs_character(**kwargs):
with sync:
try:
botname=kwargs.get('botname')
characters=get_characters_by_name(botname)
if not characters:
logger.error("Can't find CS tier for {}".format(botname))
for c in characters:
if c.id == 'cs' and hasattr(c, 'rebuild'):
log = c.rebuild()
if 'ERROR SUMMARY' in log:
logger.error(log[log.index('ERROR SUMMARY'):])
logger.info("Rebuilding chatscript for {} successfully".format(botname))
except Exception as ex:
logger.error("Rebuilding chatscript characters error {}".format(ex))
atexit.register(dump_history)
```
#### File: chatbot/server/response_cache.py
```python
import datetime as dt
from collections import defaultdict
import logging
import os
import csv
from chatbot.utils import norm
logger = logging.getLogger('hr.chatbot.server.response_cache')
class ResponseCache(object):
def __init__(self):
self.record = []
self.cursor = 0
self.index = defaultdict(list)
self.last_question = None
self.last_answer = None
self.that_question = None
self.last_time = None
def clean(self):
del self.record[:]
del self.index
self.record = []
self.cursor = 0
self.index = defaultdict(list)
self.last_question = None
self.last_answer = None
self.that_question = None
self.last_time = None
def check(self, question, answer):
# each additional character over the 10 characters, adds 30 seconds
# delay before that AIML string is allowed to repeat.
same_answers = [r for r in self.record if norm(r['Answer']) == norm(answer)]
time_elapsed = (dt.datetime.utcnow() - same_answers[-1]['Datetime']
).seconds if same_answers else 0
if max(0, len(norm(answer)) - 10) * 30 <= time_elapsed:
logger.debug("Allow repeat answer {}".format(answer))
logger.debug("Answer length {}, time elapsed {}".format(
len(norm(answer)), time_elapsed))
return True
if norm(answer) == norm(self.last_answer):
logger.debug("Last answer repeat")
return False
if not self.is_unique(answer):
logger.debug("Non unique answer")
return False
if self.contain(question, answer):
logger.debug("Repeat answer")
return False
return True
def add(self, record):
question = record['Question']
answer = record['Answer']
self.record.append(record)
self.index[norm(question)].append(len(self.record) - 1)
self.last_question = question
self.last_answer = answer
self.last_time = record.get('Datetime')
def rate(self, rate, idx):
if idx < 0:
idx = len(self.record) + idx
if idx < len(self.record):
self.record[idx]['Rate'] = rate
return True
return False
def contain(self, question, answer):
question = norm(question)
answer = norm(answer)
records = self._get_records(question)
answers = [norm(r['Answer']) for r in records]
return answer in answers
def is_unique(self, answer):
answers = [norm(r['Answer']) for r in self.record]
return not norm(answer) in answers
def _get_records(self, question):
records = [self.record[i] for i in self.index[norm(question)]]
return records
def dump(self, fname):
if not self.record:
logger.warn("Nothing to dump")
return False
if self.record and self.cursor >= len(self.record):
logger.warn("Nothing to dump")
return False
dirname = os.path.dirname(fname)
if not os.path.isdir(dirname):
os.makedirs(dirname)
header = self.record[0].keys()
with open(fname, 'a') as f:
writer = csv.DictWriter(f, header, extrasaction='ignore')
if self.cursor == 0:
writer.writeheader()
writer.writerows(self.record[self.cursor:])
self.cursor = len(self.record)
logger.warn("Dumpped chat history to {}".format(fname))
return True
return False
if __name__ == '__main__':
cache = ResponseCache()
cache.add('a', 'hi', dt.datetime(2016, 4, 22, 12, 0, 0),
Answeredby='bot', User='user')
cache.add('a', 'Hi there', dt.datetime(2016, 4, 22, 12, 30, 0))
cache.add('a', 'how are you', dt.datetime(2016, 4, 22, 12, 30, 0))
cache.add('a', 'hi there', dt.datetime(2016, 4, 22, 12, 30, 0))
cache.add('a', 'how are you', dt.datetime(2016, 4, 22, 12, 32, 0))
cache.add('a', 'how are you', dt.datetime(2016, 4, 22, 12, 32, 0))
print cache.is_unique('Hi')
print cache.is_unique('hello')
cache.dump('./tmp')
cache.dump('./tmp')
```
#### File: chatbot/server/session.py
```python
import threading
import time
import os
import sys
import datetime as dt
import logging
import traceback
import uuid
from config import HISTORY_DIR, TEST_HISTORY_DIR, SESSION_REMOVE_TIMEOUT
from response_cache import ResponseCache
from collections import defaultdict
from chatbot.server.character import TYPE_AIML
from chatbot.db import get_mongodb, MongoDB
logger = logging.getLogger('hr.chatbot.server.session')
try:
mongodb = get_mongodb()
except ImportError as ex:
mongodb = MongoDB()
logger.error(ex)
ROBOT_NAME = os.environ.get('NAME', 'default')
class SessionContext(dict):
def __init__(self):
self.context = defaultdict(dict)
def __setitem__(self, key, item):
self.__dict__[key] = item
def __getitem__(self, key):
return self.__dict__[key]
def __len__(self):
return len(self.__dict__)
def __delitem__(self, key):
del self.__dict__[key]
def __repr__(self):
return repr(self.__dict__)
def set_context(self, cid, context):
self.context[cid].update(context)
def get_context(self, cid):
return self.context[cid]
def reset_context(self, cid):
self.context[cid] = {}
class Session(object):
def __init__(self, sid):
self.sid = sid
self.session_context = SessionContext()
self.cache = ResponseCache()
self.created = dt.datetime.utcnow()
self.characters = []
dirname = os.path.join(HISTORY_DIR, self.created.strftime('%Y%m%d'))
test_dirname = os.path.join(
TEST_HISTORY_DIR, self.created.strftime('%Y%m%d'))
self.fname = os.path.join(dirname, '{}.csv'.format(self.sid))
self.test_fname = os.path.join(test_dirname, '{}.csv'.format(self.sid))
self.dump_file = None
self.closed = False
self.active = False
self.last_active_time = None
self.test = False
self.last_used_character = None
self.open_character = None
def set_test(self, test):
if test:
logger.info("Set test session")
self.test = test
def add(self, record):
if not self.closed:
logger.info("chatbot_log", extra={'data': record})
self.cache.add(record)
self.dump()
self.last_active_time = self.cache.last_time
self.active = True
return True
return False
def rate(self, rate, idx):
return self.cache.rate(rate, idx)
def set_characters(self, characters):
self.characters = characters
for c in self.characters:
if c.type != TYPE_AIML:
continue
prop = c.get_properties()
context = {}
for key in ['weather', 'location', 'temperature']:
if key in prop:
context[key] = prop.get(key)
now = dt.datetime.utcnow()
context['time'] = dt.datetime.strftime(now, '%I:%M %p')
context['date'] = dt.datetime.strftime(now, '%B %d %Y')
try:
c.set_context(self, context)
except Exception as ex:
pass
def close(self):
self.reset()
self.closed = True
def reset(self):
self.cache.clean()
self.last_used_character = None
self.open_character = None
for c in self.characters:
try:
c.refresh(self)
except NotImplementedError:
pass
def check(self, question, answer):
return self.cache.check(question, answer)
def dump(self):
if self.test:
self.dump_file = self.test_fname
else:
self.dump_file = self.fname
return self.test or self.cache.dump(self.dump_file)
def since_idle(self, since):
if self.last_active_time is not None:
return (since - self.last_active_time).total_seconds()
else:
return (since - self.created).total_seconds()
def __repr__(self):
return "<Session {} created {} active {}>".format(
self.sid, self.created, self.cache.last_time)
class Locker(object):
def __init__(self):
self._lock = threading.RLock()
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
class SessionManager(object):
def __init__(self, auto_clean=True):
self._sessions = dict()
self._users = defaultdict(dict)
self._locker = Locker()
self._session_cleaner = threading.Thread(
target=self._clean_sessions, name="SessionCleaner")
self._session_cleaner.daemon = True
if auto_clean:
self._session_cleaner.start()
def _threadsafe(f):
def wrap(self, *args, **kwargs):
self._locker.lock()
try:
return f(self, *args, **kwargs)
finally:
self._locker.unlock()
return wrap
@_threadsafe
def remove_session(self, sid):
if sid in self._sessions:
session = self._sessions.pop(sid)
session.dump()
session.close()
del session
logger.info("Removed session {}".format(sid))
def reset_session(self, sid):
if sid in self._sessions:
session = self._sessions.get(sid)
if session.active:
session.reset()
logger.warn("Reset session {}".format(sid))
def get_session(self, sid):
if sid is not None:
return self._sessions.get(sid, None)
def get_sid(self, client_id, user):
if client_id in self._users:
sessions = self._users.get(client_id)
if sessions:
sid = sessions.get(user)
session = self._sessions.get(sid)
if session:
return sid
def gen_sid(self):
return str(uuid.uuid1())
@_threadsafe
def add_session(self, client_id, user, sid):
if sid in self._sessions:
return False
if sid is None:
return False
session = Session(sid)
session.session_context.user = user
session.session_context.client_id = client_id
self._sessions[sid] = session
self._users[client_id][user] = sid
return True
def start_session(self, client_id, user, test=False, refresh=False):
"""
client_id: client id
user: user to identify session in user scope
test: if it's a session for test
refresh: if true, it will generate new session id
"""
_sid = self.get_sid(client_id, user)
if _sid and refresh:
self.remove_session(_sid)
_sid = None
if not _sid:
_sid = self.gen_sid()
self.add_session(client_id, user, _sid)
session = self.get_session(_sid)
assert(session is not None)
session.set_test(test)
return _sid
def has_session(self, sid):
return sid in self._sessions
def _clean_sessions(self):
while True:
remove_sessions = []
since = dt.datetime.utcnow()
for sid, s in self._sessions.iteritems():
if s.since_idle(since) > SESSION_REMOVE_TIMEOUT:
remove_sessions.append(sid)
for sid in remove_sessions:
self.remove_session(sid)
time.sleep(0.1)
def list_sessions(self):
return self._sessions.values()
class ChatSessionManager(SessionManager):
def __init__(self, auto_clean=True):
super(ChatSessionManager, self).__init__(auto_clean)
def dump_all(self):
fnames = []
for sid, sess in self._sessions.iteritems():
if sess and sess.dump():
fnames.append(sess.dump_file)
return fnames
def dump(self, sid):
fname = None
sess = self._sessions.get(sid)
if sess:
sess.dump()
fname = sess.dump_file
return fname
``` |
{
"source": "johnmelodyme/SciHubEVA",
"score": 2
} |
#### File: SciHubEVA/building/post_process.py
```python
import os
import sys
import shutil
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from docopt import docopt
from scihub_eva.utils.sys_utils import *
MACOS_APP_PATH = os.path.join('dist-macOS', 'SciHubEVA.app')
WINDOWS_APP_PATH = os.path.join('dist-Windows', 'SciHubEVA')
USELESS_QT_LIBS = [
'Qt3D',
'Qt3DAnimation',
'Qt3DCore',
'Qt3DExtras',
'Qt3DInput',
'Qt3DLogic',
'Qt3DQuick',
'Qt3DQuickAnimation',
'Qt3DQuickExtras',
'Qt3DQuickInput',
'Qt3DQuickRender',
'Qt3DQuickScene2D',
'Qt3DRender',
'Qt5Compat',
'QtBodymovin',
'QtCharts',
'QtChartsQml',
'QtMultimedia',
'QtMultimediaQuick',
'QtDataVisualization',
'QtPositioning',
'QtPositioningQuick',
'QtQuick3D',
'QtQuick3DAssetImport',
'QtQuick3DAssetUtils',
'QtQuick3DEffects',
'QtQuick3DHelpers',
'QtQuick3DEffects',
'QtQuick3DParticles',
'QtQuick3DRuntimeRender',
'QtQuick3DUtils',
'QtQuickTest',
'QtQuickTimeline',
'QtRemoteObjects',
'QtRemoteObjectsQml',
'QtScxml',
'QtScxmlQml',
'QtSensors',
'QtSensorsQuick',
'QtShaderTools',
'QtSql',
'QtStateMachine',
'QtStateMachineQml',
'QtTest',
'QtVirtualKeyboard',
'QtWebChannel',
'QtWebEngine',
'QtWebEngineCore',
'QtWebEngineQuick',
'QtWebEngineQuickDelegatesQml',
'QtWebSockets',
'QtWebView',
'QtWebViewQuick',
'QtXmlPatterns'
]
USELESS_PACKAGES = [
'PyInstaller'
]
def change_cwd():
cwd = os.getcwd()
if os.path.split(cwd)[1] == 'building':
os.chdir(os.path.join(cwd, os.pardir))
def post_process_win():
# remove useless Qt modules
for qt_lib in USELESS_QT_LIBS:
qt_lib_win = qt_lib.replace('Qt', 'Qt6')
qt_lib_win += '.dll'
qt_lib_path = os.path.join(WINDOWS_APP_PATH, qt_lib_win)
if os.path.exists(qt_lib_path):
os.remove(qt_lib_path)
qt_qml_dir = os.path.join(WINDOWS_APP_PATH, 'PySide6', 'qml', qt_lib)
if os.path.isdir(qt_qml_dir):
shutil.rmtree(qt_qml_dir, ignore_errors=True)
# remove useless packages
for package in USELESS_PACKAGES:
package_dir = os.path.join(WINDOWS_APP_PATH, package)
if os.path.isdir(package_dir):
shutil.rmtree(package_dir, ignore_errors=True)
def post_process_macos():
# remove useless Qt modules
for qt_lib in USELESS_QT_LIBS:
qt_lib_path = os.path.join(MACOS_APP_PATH, 'Contents', 'MacOS', qt_lib)
if os.path.exists(qt_lib_path):
os.remove(qt_lib_path)
qt_qml_dir = os.path.join(MACOS_APP_PATH, 'Contents', 'MacOS', 'PySide6', 'Qt', 'qml', qt_lib)
if os.path.isdir(qt_qml_dir):
shutil.rmtree(qt_qml_dir, ignore_errors=True)
# remove useless packages
for package in USELESS_PACKAGES:
package_dir = os.path.join(MACOS_APP_PATH, 'Contents', 'Resources', package)
package_link = os.path.join(MACOS_APP_PATH, 'Contents', 'MacOS', package)
if os.path.isdir(package_dir):
shutil.rmtree(package_dir, ignore_errors=True)
if os.path.islink(package_link):
os.remove(package_link)
if __name__ == '__main__':
args = docopt(__doc__)
change_cwd()
if is_windows():
post_process_win()
elif is_macos():
post_process_macos()
``` |
{
"source": "johnmelodyme/viruses",
"score": 3
} |
#### File: Data/Image/png2bin.py
```python
import sys, math, struct, os
from PIL import Image
doscolors = [
(0x00, 0x00, 0x00), # 0
(0x00, 0x00, 0xa8), # 1
(0x00, 0xa8, 0x00), # 2
(0x00, 0xa8, 0xa8), # 3
(0xa8, 0x00, 0x00), # 4
(0xa8, 0x00, 0xa8), # 5
(0xa8, 0xa8, 0x00), # 6
(0xa8, 0xa8, 0xa8), # 7
(0x54, 0x54, 0x54), # 8
(0x54, 0x54, 0xff), # 9
(0x54, 0xff, 0x54), # 10
(0x54, 0xff, 0xff), # 11
(0xff, 0x54, 0x54), # 12
(0xff, 0x54, 0xff), # 13
(0xff, 0xff, 0x54), # 14
(0xff, 0xff, 0xff), # 15
]
def color_distance(a, b):
return math.sqrt( (a[0]-b[0])**2 + (a[1]-b[1])**2 + (a[2]-b[2])**2 )
def nearest_color(color):
nearest = 0
for i in range(len(doscolors)):
if color_distance(color, doscolors[i]) < color_distance(color, doscolors[nearest]):
nearest = i
return nearest
buf = ""
for imgf in sys.argv[1:-1]:
img = Image.open(imgf).convert("RGB")
w, h = img.size
for y in xrange(0, h, 2):
for x in xrange(w):
b = (nearest_color(img.getpixel((x, y))) << 4)
if y < 48 or x >= len("You've Nyaned for 00000.0 seconds!"):
b |= nearest_color(img.getpixel((x, y+1)))
else:
b |= 15 if ((b>>4) < 8) else 0
buf += chr(b)
img.close()
with open(sys.argv[::-1][0], "wb") as out:
out.write(buf)
``` |
{
"source": "john-mestas-t/impi-core",
"score": 2
} |
#### File: john-mestas-t/impi-core/BL_Item.py
```python
from DB_Controlers import *
from DB import *
from datetime import datetime
import hashlib
import os
import zlib
class Utils():
def __init__(self):
pass
@classmethod
def delete_item(self, id_it):
return DB_Item.delete_item(id_it)
@classmethod
def create_item(self, ls_prm):
try:
OB_IT = Item(ls_prm)
OB_IT.add_item()
return True
except Exception:
return False
@classmethod
def update_item(self, ls_prm):
eu = ls_prm[0] # eu = element to update
vu = ls_prm[1] # vu = value to update
cu = ls_prm[2] # cu = condicion to update
vc = ls_prm[3] # vc = value of condition
try:
if eu == 'id_oned':
DB_Item.update_one(
[
eu,
vu,
cu,
vc
]
)
elif eu == 'status_item':
DB_Item.update_one(
[
eu,
vu,
cu,
vc
]
)
elif eu == 'link_sharing':
DB_Item.update_one(
[
eu,
vu,
cu,
vc
]
)
return True
except Exception:
return False
@classmethod
def create_path(self, ls_prm):
ph_it = ls_prm[0] # folder path
tp_it = ls_prm[1] # type folder
if tp_it == 'FILE':
ph_it = Utils.get_parent(ph_it)
if tp_it == 'FOLDER':
pass
if not os.path.exists(ph_it):
os.makedirs(ph_it)
return True
else:
return False
@classmethod # [old]
def exist_folder(self, id_ac, dt_fl):
nm_fle = Utils.normalize_path(dt_fl[1]) # item name
ph_fle = Utils.normalize_path(dt_fl[2]) # item path
ls_it = DB_Item.select_all_items()
ls_it = ls_it.where(
(Items.id_account == id_ac) &
(Items.type_item == 'FOLDER') &
(Items.name_item == nm_fle) &
(Items.ph_item_ser == ph_fle)
)
if ls_it.exists():
return True
else:
return False
@classmethod
def exist_file(self, dt_fl): # [old]
id_ac = dt_fl[0]
nm_fle = Utils.normalize_path(dt_fl[2]) # item name
ph_fle = Utils.normalize_path(dt_fl[3]) # item path
ls_it = DB_Item.select('all')
ls_it = ls_it.where(
(Items.id_account == id_ac) &
(Items.type_item == 'FILE') &
(Items.name_item == nm_fle) &
(Items.ph_item_ser == ph_fle)
)
if ls_it.exists():
print('SI existe')
else:
print('NO NO existe')
@classmethod
def get_files(self, id_ac_prm, or_by=''):
ls_it = DB_Item.select('all')
ls_it = ls_it.where(
(Items.id_account == id_ac_prm) &
(Items.status_item == 'I') &
(Items.type_item == 'FILE') &
(Items.level_item > 0)
)
if or_by == 'asce':
ls_it = ls_it.order_by(Items.level_item)
if or_by == 'desc':
ls_it = ls_it.order_by(Items.level_item.desc())
return ls_it
@classmethod
def get_folders(self, id_ac_prm, or_by=''):
ls_it = DB_Item.select('all')
ls_it = ls_it.where(
(Items.id_account == id_ac_prm) &
(Items.status_item == 'I') &
(Items.type_item == 'FOLDER') &
(Items.level_item > 0)
)
if or_by == 'asce':
ls_it = ls_it.order_by(Items.level_item)
if or_by == 'desc':
ls_it = ls_it.order_by(Items.level_item.desc())
return ls_it
@classmethod
def get_father_hash(self, path):
return Utils.get_path_to_hash(os.path.dirname(path))
@classmethod
def get_parent(self, p_path):
parent_ph = os.path.abspath(os.path.join(p_path, os.pardir))
parent_ph = Utils.normalize_path(parent_ph)
return parent_ph
@classmethod
def get_path_to_hash(self, path):
m = hashlib.md5()
m.update(path.encode('utf-8'))
return m.hexdigest()
@classmethod
def get_type(self, path):
if os.path.isdir(path):
return 'FOLDER'
else:
return 'FILE'
@classmethod
def get_path_item_ser(self, p_local_path, p_count_path):
path_server = p_local_path.replace(p_count_path, '')
if len(path_server) == 0:
return None
elif len(path_server) > 0:
return path_server
@classmethod
def get_level_path(self, p_path):
try:
if p_path is not None:
level = p_path.count('/')
return level
except Exception:
return 0
@classmethod
def get_md5(self, path):
hash_md5 = hashlib.md5()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
@classmethod
def get_sha1(self, path):
sha1 = hashlib.sha1()
try:
file = open(os.stat.S_IREAD(path), 'rb')
while True:
data = file.read(4096)
if not data:
break
sha1.update(data)
except IOError as e:
print('File \'' + path + '\' not found!')
print(e)
return None
except:
return None
return sha1.hexdigest
@classmethod
def get_crc32(self, path, block_size=1048576):
crc = 0
try:
file = open(os.stat.S_IREAD(path), 'rb')
while True:
data = file.read(4096)
if not data:
break
crc = zlib.crc32(data, crc)
except IOError as e:
print('File \'' + path + '\' not found!')
print(e)
return None
except:
return None
return str(crc).upper()
@classmethod
def get_name(self, path):
return os.path.basename(path)
@classmethod
def get_extension(self, path):
if self.get_type(path) == 'DIR':
return None
else:
return os.path.splitext(path)[1]
@classmethod
def get_size(self, path):
if self.get_type(path) == 'FILE':
return self.get_size_file(path)
else:
return self.get_size_dir(path)
@classmethod
def get_size_file(self, path):
return os.stat(path).st_size
@classmethod
def get_size_dir(self, start_path='.'):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
@classmethod
def combine_paths(self, ph_ac_prm, ph_it_ser_prm):
new_ph = ph_ac_prm + '/' + ph_it_ser_prm
return Utils.normalize_path(new_ph)
@classmethod
def normalize_path(self, path):
path = os.path.normpath(path).replace("\\", "/")
return self.delete_last_slah(path)
@classmethod
def delete_last_slah(self, path):
num_path = len(path)
if path[num_path - 1:] == '/':
return path[:num_path - 1]
else:
return path
@classmethod
def validate_size(self, file_size): # se pasa de 50
top_size = 50 * 1048576
if file_size < top_size:
return False
return True
@classmethod
def validate_extentions(self, ext_file): # esta permitido
# ls_ea = list_extentions_not_allowed
ls_ea = ['.jpg', '.png', '.ini', '.exe']
for ea in ls_ea:
if ext_file == ea:
return False
return True
@classmethod
def create_date(self, path):
format = '%Y-%m-%d %H:%M:%S'
t = os.path.getctime(path)
t = datetime.fromtimestamp(t)
return t.strftime(format)
@classmethod
def modify_date(self, path):
format = '%Y-%m-%d %H:%M:%S'
t = os.path.getmtime(path)
t = datetime.fromtimestamp(t)
return t.strftime(format)
@classmethod
def get_id_one(self, ls_dt):
id_on = ls_dt[0]
ph_ac = ls_dt[1]
ph_it = ls_dt[2]
if id_on is not None:
return id_on
else:
return 'root' if ph_ac == ph_it else None
@classmethod
def update_in_DB(self, ls_pr):
return DB_Item.update_one(ls_pr)
@classmethod
def exist_in_DB(self, ls_pr):
TYPE_ITEM = ls_pr[0]
HASH_ITEM = ls_pr[1]
HASH_PATH = ls_pr[2]
PATH_ITEM = ls_pr[3]
return DB_Item.exist_item([TYPE_ITEM, HASH_ITEM, HASH_PATH, PATH_ITEM])
@classmethod
def comply_restrictions(self, ls_pr):
TYPE_ITEM = ls_pr[0]
SIZE_ITEM = ls_pr[1]
EXTN_ITEM = ls_pr[2]
if TYPE_ITEM == 'FOLDER':
return True
if not Utils.validate_extentions(EXTN_ITEM):
return False
if Utils.validate_size(SIZE_ITEM): # se pasa de 50???
return False
return True
class Item(object):
def __init__(self, ls_dt_it):
self.id_ac = ls_dt_it[0]
self.ph_ac = ls_dt_it[1]
self.st_it = ls_dt_it[2] # st = status
self.ph_it = ls_dt_it[3]
self.tp_it = ls_dt_it[4] # type item
self.nm_it = ls_dt_it[5] # name item
self.ex_it = ls_dt_it[6] # extention item
self.sz_it = ls_dt_it[7] # size item
self.id_on = ls_dt_it[8] # id onedrive
def add_item(self):
ID_ACCOUNT = self.id_ac
ID_HASH_PATH = Utils.get_path_to_hash(self.ph_it)
PH_ITEM_LOC = self.ph_it
PH_ITEM_SER = Utils.get_path_item_ser(self.ph_it, self.ph_ac)
LEVEL_ITEM = Utils.get_level_path(PH_ITEM_SER)
TYPE_ITEM = self.tp_it
NAME_ITEM = self.nm_it
EXTN_ITEM = self.ex_it
SIZE_ITEM = self.sz_it
DATE_CREATED_ITEM = Utils.create_date(self.ph_it)
DATE_MODIFIED_ITEM = Utils.modify_date(self.ph_it)
HASH_MD5_ITEM = None if TYPE_ITEM == 'FOLDER' else Utils.get_md5(self.ph_it)
STATUS_ITEM = 'N' if self.ph_ac == self.ph_it else self.st_it
ID_HASH_PARENT = Utils.get_father_hash(self.ph_it)
ID_ONED = Utils.get_id_one(
[
self.id_on,
self.ph_ac,
self.ph_it
]
)
LINK_SHARING = None
d = {
"id_account" : ID_ACCOUNT,
"id_hash_path" : ID_HASH_PATH,
"ph_item_loc" : PH_ITEM_LOC,
"ph_item_ser" : PH_ITEM_SER,
"level_item" : LEVEL_ITEM,
"type_item" : TYPE_ITEM,
"name_item" : NAME_ITEM,
"extn_item" : EXTN_ITEM,
"size_item" : SIZE_ITEM,
"date_created_item" : DATE_CREATED_ITEM,
"date_modified_item": DATE_MODIFIED_ITEM,
"hash_md5_item" : HASH_MD5_ITEM,
"status_item" : STATUS_ITEM,
"id_hash_parent" : ID_HASH_PARENT,
"id_oned" : ID_ONED,
"link_sharing" : LINK_SHARING
}
try:
DB_Item.insert_many_items([d])
return True
except Exception:
return False
if __name__ == '__main__':
pass
```
#### File: john-mestas-t/impi-core/BL_Local_Engine_Utils.py
```python
from DB_Controlers import *
from BL_Item import *
class File_Directoy(object):
def __init__(self, p_id_ac, p_ph_ac):
self.id_ac = p_id_ac #id_ac = id account
self.ph_ac = p_ph_ac #ph_ac = account path
self.start_scann(self.ph_ac)
def start_scann(self, ph_it):
ph_it = Utils.normalize_path(ph_it) #retorna un path limpio
TYPE_ITEM = Utils.get_type(ph_it)
SIZE_ITEM = Utils.get_size(ph_it)
EXTN_ITEM = None if TYPE_ITEM == 'FOLDER' else Utils.get_extension(ph_it)
HASH_ITEM = None if TYPE_ITEM == 'FOLDER' else Utils.get_md5(ph_it)
HASH_PATH = Utils.get_path_to_hash(ph_it)
PATH_ITEM = ph_it
NAME_ITEM = Utils.get_name(ph_it)
ls_restrictions = [
TYPE_ITEM, # type item
SIZE_ITEM, # size item
EXTN_ITEM, # extention item [FOLDER = NULL]
HASH_ITEM, # item MD5 [FOLDER = NULL]
HASH_PATH, # path MD5
PATH_ITEM, # complete item's path
NAME_ITEM, # only item's name, no path
]
if self.exist_in_DB(ls_restrictions):
if self.comply_restrictions(ls_restrictions):
ob_item = Item(
[
self.id_ac,
self.ph_ac,
'I',
ph_it,
TYPE_ITEM,
NAME_ITEM,
EXTN_ITEM,
SIZE_ITEM,
None
]
)
if ob_item.add_item():
print('SCANNED-ITEM: {} [OK]'.format(NAME_ITEM))
else:
Utils.update_in_DB(
[
'status_item',
'N',
'id_hash_path',
HASH_PATH
]
)
if TYPE_ITEM == 'FOLDER':
[self.start_scann(os.path.join(ph_it, x)) for x in os.listdir(ph_it)]
def exist_in_DB(self, ls_restrictions): # ls_pr = list_parameters
TYPE_ITEM = ls_restrictions[0]
SIZE_ITEM = ls_restrictions[1]
EXTN_ITEM = ls_restrictions[2]
HASH_ITEM = ls_restrictions[3]
HASH_PATH = ls_restrictions[4]
PATH_ITEM = ls_restrictions[5]
NAME_ITEM = ls_restrictions[6]
if not Utils.exist_in_DB([TYPE_ITEM, HASH_ITEM, HASH_PATH, PATH_ITEM]):
return True
def comply_restrictions(self, ls_restrictions): # ls_pr = list_parameters
TYPE_ITEM = ls_restrictions[0]
SIZE_ITEM = ls_restrictions[1]
EXTN_ITEM = ls_restrictions[2]
HASH_ITEM = ls_restrictions[3]
HASH_PATH = ls_restrictions[4]
PATH_ITEM = ls_restrictions[5]
NAME_ITEM = ls_restrictions[6]
if Utils.comply_restrictions([TYPE_ITEM, SIZE_ITEM, EXTN_ITEM]):
return True
return False
if __name__ == '__main__':
pass
```
#### File: john-mestas-t/impi-core/BL_Server_Engine_Utils.py
```python
from DB_Controlers import *
import datetime
import dropbox
import os
import sys
import stat
import time
import onedrivesdk
from onedrivesdk.helpers import GetAuthCodeServer
class Convert(object):
def __init__(self):
pass
@classmethod
def to_list(self, nm_ser_prm, dt_ac_prm):
ls_data_ac = []
if nm_ser_prm == 'dropbox':
ls_data_ac.append(dt_ac_prm[0].id_account) # 0
ls_data_ac.append(dt_ac_prm[0].id_serv.name_serv) # 1
ls_data_ac.append(dt_ac_prm[0].path_account) # 2
ls_data_ac.append(dt_ac_prm[0].app_access_token_serv) # 3
if nm_ser_prm == 'onedrive':
ls_data_ac.append(dt_ac_prm[0].id_account) # 0
ls_data_ac.append(dt_ac_prm[0].id_serv.name_serv) # 1
ls_data_ac.append(dt_ac_prm[0].path_account) # 2
ls_data_ac.append(dt_ac_prm[0].app_key_serv) # 3
ls_data_ac.append(dt_ac_prm[0].app_secret_serv) # 4
return ls_data_ac
class SV_DropBox(object):
def __init__(self, p_list_dada_serv):
self.id_ac = p_list_dada_serv[0]
self.path_ac = p_list_dada_serv[2]
self.token_ac = p_list_dada_serv[3]
self.client = None
def connect(self):
try:
self.client = dropbox.Dropbox(self.token_ac)
return True
except Exception:
print('error connecting to the server: DropBox')
return False
def exist_file(self, ls_dt_it):
nm_it_lo = ls_dt_it[0] # nm_it_lo = file name on loca
ph_it_lo = ls_dt_it[1] # ph_it_lo = file path on local
for it in self.get_files():
nm_it_sr = it[0]
ph_it_sr = it[1]
if nm_it_lo == nm_it_sr and ph_it_lo == ph_it_sr:
return True
else:
return False
def delete_item(self, ph_it_prm):
try:
OB = self.client.files_delete(ph_it_prm)
if OB is not None:
return True
else:
return False
except Exception:
return False
def download_file(self, dt_fle):
ph_lo = dt_fle[0] # ph_lo = local path
ph_sr = dt_fle[1] # ph_sr = server path
try:
self.client.files_download_to_file(ph_lo, ph_sr, rev=None)
return True
except Exception:
return False
# md = mode
# ov = overwrite
def upload_file(self, ls_dt_it):
ov_it_lo = ls_dt_it[0] # ov_it_lo = override mode [true/folse]
ph_it_lo = ls_dt_it[1] # ph_it_lo = path local
ph_it_sr = ls_dt_it[2] # ph_it_sr = path server
mode = (dropbox.files.WriteMode.overwrite
if ov_it_lo else dropbox.files.WriteMode.add)
with open(ph_it_lo, 'rb') as f:
data = f.read()
try:
res = self.client.files_upload(
data, ph_it_sr, mode,
client_modified=self.get_mtime(ph_it_lo),
mute=True)
return False if res is None else True
except dropbox.exceptions.ApiError:
return False
def get_mtime(self, p_path):
mtime = os.path.getmtime(p_path)
print(mtime)
return datetime.datetime(*time.gmtime(mtime)[:6])
# fl = file
def get_files(self):
ls_fl = []
dt_fl = []
for it in self.client.files_list_folder('', recursive=True).entries:
try:
it.client_modified
dt_fl = [it.name, it.path_display]
ls_fl.append(dt_fl)
except Exception:
pass
return ls_fl
def get_folders(self):
ls_flr = []
for it in self.client.files_list_folder('', recursive=True).entries:
subs = str(it)
if 'FolderMetadata' in subs:
lv_it = it.path_display.count('/')
nm_it = it.name
ph_it = it.path_display
ls_flr.append(str(lv_it) + '|' + nm_it + '|' + ph_it)
return ls_flr
def get_link_sharing(self, path_it):
try:
print('Path-Sharing: {}'.format(path_it))
return self.client.sharing_create_shared_link(path_it, True).url
except Exception as e:
print(e)
return None
class SV_OneDrive(object):
def __init__(self, p_list_dada_serv):
self.number_ups = []
self.id_ac = p_list_dada_serv[0]
self.path_ac = p_list_dada_serv[2]
self.app_key_ac = p_list_dada_serv[3]
self.app_secret_ac = p_list_dada_serv[4]
self.client = None
def connect(self):
try:
redirect_uri = "http://localhost:8080/"
client_id = self.app_key_ac
client_secret = self.app_secret_ac
self.client = onedrivesdk.get_default_client(client_id=client_id,
scopes=['wl.signin',
'wl.offline_access',
'onedrive.readwrite'])
auth_url = self.client.auth_provider.get_auth_url(redirect_uri)
code = GetAuthCodeServer.get_auth_code(auth_url, redirect_uri)
self.client.auth_provider.authenticate(code, redirect_uri, client_secret)
return True
except Exception:
print('error connecting to the server: OneDive')
return False
def upload_folder(self, ls_dt_fld):
nm_fld = ls_dt_fld[0] # nm_fld = name of folder
pr_fld = ls_dt_fld[1] # pr_fld = id parent
f = onedrivesdk.Folder()
i = onedrivesdk.Item()
i.name = nm_fld
i.folder = f
try:
OB = self.client.item(drive="me", id=pr_fld).children.add(i)
return OB.id
except Exception:
return None
def delete_item(self, id_one):
try:
self.client.item(id=id_one).delete()
return True
except Exception:
return False
def upload_file(self, ls_dt_it):
pr_it = ls_dt_it[0] # pr_it = father id
nm_it = ls_dt_it[1] # nm_it = name item to upload
ph_it = ls_dt_it[2] # ph_it = local path item to upload
try:
OB = self.client.item(drive="me", id=pr_it).children[nm_it].upload(ph_it)
return OB.id
except Exception:
return None
def dw_file(self, ls_dt_it):
id_ser = ls_dt_it[0] # item id on server
ph_loc = ls_dt_it[1] # local path
try:
self.client.item(id=id_ser).download(ph_loc)
return True
except Exception:
return False
def get_files(self, it_id='root', ls_fl=[], ph_it='/'):
try:
for it in self.navigate(it_id):
if it.folder is not None:
self.get_files(it.id, ls_fl, ph_it + it.name + '/')
else:
ls_fl.append([it.id, it.name, ph_it + it.name])
return ls_fl
except Exception as e:
print('error: {}'.format(e))
def get_folders(self, it_id='root', ls_fl=[], ph_it='/'):
try:
for it in self.navigate(it_id):
if it.folder is not None:
ls_fl.append([it.id, it.name, ph_it + it.name])
self.get_folders(it.id, ls_fl, ph_it + it.name + '/')
return ls_fl
except Exception as e:
print('error: {}'.format(e))
def navigate(self, it_id='root'):
items = self.client.item(id=it_id).children.get()
return items
def get_link_sharing(self, id_oned, action): # action [1, 2]
action = "view" if action == 1 else "edit"
permission = self.client.item(id=id_oned).create_link(action).post()
return permission.link.web_url
if __name__ == '__main__':
# ic = 1
# pa = 'C:/godo-sync/[email protected]__dropbox__'
# at = 'foJG6ertFYEAAAAAAAAUbC69UrGK_QTTrsN6qNLHdik5nqtHrlF0pnjmPeMWYUD2'
# ob = SV_DropBox([ic, 'dropbox', pa, at])
# ob.connect()
# # sl = ob.get_folders()
# sl = ob.get_link_sharing("/FOLDER_01/PD.pdf")
# print(sl)
pass
```
#### File: john-mestas-t/impi-core/BL_Server_Sharing.py
```python
from BL_Item import *
from BL_Server_Sharing_Utils import *
from DB_Controlers import *
class ServerSharing(object):
def __init__(self):
pass
def start_engine(self):
# self.init_actio_server('download')
self.init_actio_server('upload')
# self.init_actio_server('delete')
# abs = accounts by server
# ls = list
# ac = account
# act = action
# ob = object
def init_actio_server(self, act_prm):
ls_abs = DB_Storage_server.select('asbs')
for ls_ac in ls_abs:
server = ls_ac[0]
accounts = ls_ac[1]
for ac in accounts:
ob_ac = eval(server)(ac)
ob_ac.set_act(act_prm)
class DropBox(object):
def __init__(self, id_ac_prm):
# get account information with id_account
ls_data_ac = self.get_data_ac(id_ac_prm)
self.id_ac = ls_data_ac[0] # id account
self.nm_ac = ls_data_ac[1] # name account
self.ph_ac = ls_data_ac[2] # path account
self.at_ac = ls_data_ac[3] # token
self.ls_dt_ac = [
self.id_ac,
self.nm_ac,
self.ph_ac,
self.at_ac,
]
# consult database for information about to id account
def get_data_ac(self, id_ac_prm):
data_ac_sh = DB_Account_sharing.select('all')
data_ac_sh = data_ac_sh.where(Accounts_sharing.id_account == id_ac_prm)
return Convert.to_list('dropbox', data_ac_sh)
def set_act(self, act_prm):
if act_prm == 'upload':
self.up_files() # [OK]
if act_prm == 'download':
self.dw_files() # [OK]
if act_prm == 'delete':
self.dl_files() # [OK]
def dl_files(self):
ls_it = DB_Item.select('all')
ls_it = ls_it.where(
(Items.id_account == self.id_ac) &
(Items.type_item == 'FILE') &
(Items.status_item == 'R')
)
for it in ls_it:
OB_DBX = SV_DropBox(self.ls_dt_ac)
nm_it = it.name_item # item name that will be delete
if OB_DBX.connect():
if OB_DBX.delete_item(it.ph_item_ser):
if Utils.delete_item(it.id_item):
print('DELETED-FILE: {} [OK]'.format(nm_it))
def dw_files(self):
OB_DBX = SV_DropBox(self.ls_dt_ac)
if OB_DBX.connect():
for dt_fl in OB_DBX.get_files():
nm_fle = dt_fl[0] # nm_fl = name file
ph_fle = Utils.normalize_path(dt_fl[1]) # Item path in server
if not self.exist_file(nm_fle, ph_fle):
ph_loc = Utils.combine_paths(self.ph_ac, ph_fle)
Utils.create_path([ph_loc, 'FILE'])
df = OB_DBX.download_file(
[
ph_loc, # path local
ph_fle # path in server
]
)
ci = Utils.create_item(
[
self.id_ac,
self.ph_ac,
'N',
ph_loc,
None
]
)
if df:
print('\nDOWNLOADED-FILE: {} [OK]'.format(nm_fle))
if ci:
print('CREATED-ITEM: {} [OK]'.format(nm_fle))
def exist_file(self, nm_it, ph_it):
ls_it = DB_Item.select('all')
ls_it = ls_it.where(
(Items.id_account == self.id_ac) &
(Items.type_item == 'FILE') &
(Items.name_item == nm_it) &
(Items.ph_item_ser == ph_it)
)
if ls_it.exists():
return True
else:
return False
# lo = local
# sr = server
def up_files(self):
ls_it = Utils.get_files(self.id_ac)
ls_it = ls_it.where(
Items.extn_item == '.properties'
)
for it in ls_it:
ov_it_lo = True # ov = override parameter
id_it_lo = it.id_item
ph_it_lo = it.ph_item_loc
nm_it_lo = "/" + it.name_item
OB_DBX = SV_DropBox(self.ls_dt_ac)
if OB_DBX.connect():
uf = OB_DBX.upload_file(
[
ov_it_lo,
ph_it_lo,
nm_it_lo
]
)
ui = Utils.update_item(
[
'status_item',
'N',
'id_item',
id_it_lo
]
)
if uf and ui:
print('\nUPLOADED-FILE: {} [OK]'.format(it.name_item))
# nm = name
# ph = path
# ak = app_key
# as = app_secret
# dt = data
class OneDrive(object):
def __init__(self, p_id_ac):
# get account information with id_account
ls_data_ac = self.get_data_ac(p_id_ac)
self.id_ac = ls_data_ac[0] # id
self.nm_ac = ls_data_ac[1] # name
self.ph_ac = ls_data_ac[2] # path
self.ak_ac = ls_data_ac[3] # key
self.as_ac = ls_data_ac[4] # pass
self.ls_dt_ac = [
self.id_ac,
self.nm_ac,
self.ph_ac,
self.ak_ac,
self.as_ac
]
# consult database for information about to id account
def get_data_ac(self, p_id_ac):
data_ac_sh = DB_Account_sharing.select('all')
data_ac_sh = data_ac_sh.where(Accounts_sharing.id_account == p_id_ac)
return Convert.to_list('onedrive', data_ac_sh)
# fld = folder
# act = action
def set_act(self, p_act):
if p_act == 'upload':
self.up_files() # [OK]
if p_act == 'download':
self.dw_files() # [OK]
if p_act == 'delete':
self.dl_files() # [OK]
def dl_files(self):
ls_it = DB_Item.select('all')
ls_it = ls_it.where(
(Items.id_account == self.id_ac) &
(Items.type_item == 'FILE') &
(Items.status_item == 'R')
)
for it in ls_it:
OB_ONE = SV_OneDrive(self.ls_dt_ac)
nm_it = it.name_item # item name that will be delete
if OB_ONE.connect():
if OB_ONE.delete_item(it.id_oned):
if Utils.delete_item(it.id_item):
print('DELETED-FILE: {} [OK]'.format(nm_it))
else:
print('[ERROR] borrando en DB')
else:
print('[ERROR] eliminando de OD')
else:
print('[ERROR] conectando con OD')
def exist_folder(self, dt_fl):
id_fld = dt_fl[1] # id onedrive
nm_fld = dt_fl[1] # item name
ph_fld = Utils.normalize_path(dt_fl[2]) # item path
ls_it = DB_Item.select('all')
ls_it = ls_it.where(
(Items.id_account == self.id_ac) &
(Items.type_item == 'FOLDER') &
(Items.name_item == nm_fld) &
(Items.ph_item_ser == ph_fld) &
(Items.id_oned == id_fld)
)
if ls_it.exists():
return True
else:
return False
def dw_files(self):
OB_ONED = SV_OneDrive(self.ls_dt_ac)
if OB_ONED.connect():
for fle in OB_ONED.get_files():
id_one = fle[0] # onedrive id
nm_one = fle[1] # item name
ph_one = Utils.normalize_path(fle[2]) # item path
if not self.exist_file(fle):
ph_loc = Utils.combine_paths(self.ph_ac, ph_one)
Utils.create_path([ph_loc, 'FILE'])
df = OB_ONED.dw_file(
[
id_one,
ph_loc # ph_loc = complete local path
]
)
ci = Utils.create_item(
[
self.id_ac,
self.ph_ac,
'N',
ph_loc,
id_one
]
)
if df:
print('\nDOWNLOADED-FILE: {} [OK]'.format(nm_one))
if ci:
print('CREATED-ITEM: {} [OK]'.format(nm_one))
def exist_file(self, dt_fle):
id_fld = dt_fle[0] # id onedrive
nm_fle = dt_fle[1] # item name
ph_fle = Utils.normalize_path(dt_fle[2]) # item path
ls_it = DB_Item.select('all')
ls_it = ls_it.where(
(Items.id_account == self.id_ac) &
(Items.type_item == 'FILE') &
(Items.name_item == nm_fle) &
(Items.ph_item_ser == ph_fle) &
(Items.id_oned == id_fld)
)
if ls_it.exists():
return True
else:
return False
# fle = file
# pr = parent
def up_files(self):
ls_it = Utils.get_files(self.id_ac, 'asce')
ls_it = ls_it.where(
Items.extn_item == '.properties'
)
for it in ls_it:
id_it = it.id_item
nm_it = it.name_item
ph_it = it.ph_item_loc
OB_ONED = SV_OneDrive(self.ls_dt_ac)
if OB_ONED.connect():
# up_item return ob item
OB = OB_ONED.upload_file(
[
'root', # pr_it = parent item
nm_it, # nm_it = name item
ph_it # ph_it = path item
]
)
if OB is not None:
up = Utils.update_item(
[
'id_oned',
OB,
'id_item',
id_it
]
)
us = Utils.update_item(
[
'status_item',
'N',
'id_item',
id_it
]
)
if up and us:
print('UPLOADED-FILE: {} [OK]'.format(nm_it))
if __name__ == '__main__':
ob = ServerSharing()
ob.start_engine()
``` |
{
"source": "johnmetzcar/CANA",
"score": 4
} |
#### File: cana/canalization/boolean_canalization.py
```python
import numpy as np
import itertools
from .. utils import *
from collections import deque
__author__ = """\n""".join([
'<NAME> <<EMAIL>>',
'<NAME> <<EMAIL>>',
'<NAME> <<EMAIL>>'
])
# Quine-McCluskey Functions
def make_transition_density_tables(k=1, outputs=[0, 1]):
""" This method creates a tuple-of-lists that is used to calculate Prime Implicants in the first step of the Quine-McCluskey algorithm :cite:`Quine:1955`.
In practice it separates the positive and negative transitions (tuple), then further separates it by counting the number of 1's in each (lists).
Args:
k (int) : the ``k`` number of inputs
outputs (list) : a list of ``[0,1]`` output for each state number.
Returns:
tables (tuple) : a tuple where [0] is the negative table and [1] is the positive table.
"""
# make sure outputs are integers
outputs = list(map(int, outputs))
# we need to split up the LUT based on the transition (to either 0 or 1) and the density of 1s in the binstate
transition_density_tuple = [[[] for density in range(k + 1)] for transition in [0, 1]]
for statenum in range(2**k):
binstate = statenum_to_binstate(statenum, base=k)
density = binstate_to_density(binstate)
transition = outputs[statenum]
# Account for Dont-Care (2) transition states
if transition == 2:
transition_density_tuple[0][density].append(binstate)
transition_density_tuple[1][density].append(binstate)
else:
transition_density_tuple[transition][density].append(binstate)
#
return transition_density_tuple
def find_implicants_qmOLD(column, verbose=False):
""" Finds the prime implicants (PI) using the Quine-McCluskey algorithm :cite:`Quine:1955`.
Args:
column (list) : A list-of-lists containing the counts of ``1`` for each input.
This is given by `make_transition_density_tables`.
Returns:
PI (set): a set of prime implicants.
# Authors: <NAME> and <NAME>
"""
N = len(column) - 1
# we start with an empty set of implicants
prime_implicants = set()
done = False
# repeat the following until no matches are found
while not done:
done = True
# default everything to empty with no matches
next_column = [set() for _ in range(N + 1)]
matches = [[False for _ in range(len(column[density]))] for density in range(N + 1)]
# loop through the possible densities
for density in range(N):
# compare the implicants from successive densities
for i, implicant in enumerate(column[density]):
for j, candidate in enumerate(column[density + 1]):
# check if the implicants differ on only one variable
match = _adjacent(implicant, candidate)
if match:
matches[density][i] = matches[density + 1][j] = True
matches_density = sum([var != '0' for var in match])
next_column[matches_density].add(match)
done = False
# now add back the implicants that were not matched
for i in range(N + 1):
for j in range(len(matches[i])):
if not matches[i][j]:
prime_implicants.add(column[i][j])
# use the simplified table as the starting point of the next pass
column = [list(g) for g in next_column]
return prime_implicants
def _adjacent(imp1, imp2):
"""Determine if two implicants are adjacent: ie differ on only one variable.
Args:
imp1 (string): implicant 1
imp1 (string): implicant 2
Returns:
(bool)
"""
differences = 0
match = []
for m1, m2 in zip(imp1, imp2):
if m1 == m2:
match.append(m1)
elif differences:
return False
else:
differences += 1
match.append('2')
return "".join(match)
def __pi_covers(implicant, input, symbol=['2', '#', 2]):
"""Determines if a minterm is covered by a specific implicant.
Args:
implicant (string): the implicant.
minterm (string): the minterm.
Returns:
x (bool): True if covered else False.
"""
for i, m in zip(implicant, input):
if i in symbol:
continue
if int(i) != int(m):
return False
return True
def computes_pi_coverage(k, outputs, prime_implicants):
"""Computes the input coverage by Prime Implicant schematas.
Args:
k (int): the number of inputs.
outpus (list): the list of transition outputs.
prime_implicants (tuple): a tuple containing a list negative and positive prime implicants. This is returned by `find_implicants_qm`.
Returns:
pi_coverage (dict) : a dictionary of coverage where keys are input states and values are lists of the Prime Implicants covering that input.
Note: based on code from <NAME> and <NAME>.
"""
# make sure outputs are integers
outputs = list(map(int, outputs))
pi_coverage = {}
for statenum in range(2**k):
binstate = statenum_to_binstate(statenum, base=k)
pi_coverage[binstate] = covering_implicants = []
transition = outputs[statenum]
# Add support for DontCare (2) transition
if transition == 2:
transition = [0, 1]
else:
transition = [outputs[statenum]]
for t in transition:
for prime_implicant in prime_implicants[t]:
if __pi_covers(prime_implicant, binstate):
covering_implicants.append(prime_implicant)
#
return pi_coverage
# Two Symbols Functions
def find_two_symbols_v2(k=1, prime_implicants=None, verbose=False, verbose_level=0):
"""This function calculates the permutation, two-symbol (TS), list of schematas.
This implementation considers '11' and '00' as a possible input permutation.
Args:
k (int): The number of inputs.
prime_implicants (list): The prime implicants computed.
Returns:
final_list (list) : The list of two-symbol schematas.
Note: This is a modification of the original algorithm that can be found in Marques-Pita & Rocha [2013].
"""
if not len(prime_implicants):
return []
# If this node has no input, yet it affects other nodes (fixed variable)
if k == 0:
TSf = []
for pi in prime_implicants:
TSf.append((pi, [], []))
return TSf
# Init
# n_pi = len(prime_implicants) # never used
pi_matrix = np.array(tuple(map(tuple, prime_implicants)), dtype=int)
# List of the Two-Symbol Schematas
TS = []
# Init Queue
Q = deque()
Q_history = set()
Q.append(pi_matrix)
i = 0
while len(Q):
schematas = Q.popleft()
n_schematas = schematas.shape[0]
i += 1
if verbose:
if verbose_level == 1:
if (i % 500 == 0):
print('>>> QUEUE: pop | A (m=%d) | Queue size: %d' % (n_schematas, len(Q)))
elif verbose_level > 5:
print('>>> QUEUE: pop | A (m=%d) | Queue size: %d' % (n_schematas, len(Q)))
if verbose_level > 10:
print(schematas)
# count the number of [0's, 1's, 2's] in each column
column_counts = _count_cols_symbols_v2(pi_matrix=schematas, verbose=verbose, verbose_level=verbose_level)
if verbose and verbose_level > 10:
print('>>> COLUMN Schema Counts:')
# find the permutation groups based on column counts
perm_groups = _check_col_counts_v3(counts_matrix=column_counts, verbose=verbose, verbose_level=verbose_level)
if (perm_groups != -1):
if verbose and verbose_level > 10:
print('>>> There are permutable groups! Lets loop them')
for x_group in perm_groups:
if verbose and verbose_level > 20:
print('>>> x_group:', x_group)
print('>>> Truncated schemata matrix:')
print(schematas[:, x_group].T)
# find the row counts by taking the transpose of the truncated schemata list
row_counts = _count_cols_symbols_v2(pi_matrix=schematas[:, x_group].T, verbose=verbose, verbose_level=verbose_level)
if verbose and verbose_level > 20:
print('>>> ROW Schema Counts:')
print(row_counts)
# make sure all row counts are the same
if not (row_counts == row_counts[0]).all():
if verbose and verbose_level > 20:
print('>>> row_counts are NOT the same (-1)')
perm_groups = -1
if verbose and verbose_level > 10:
print(">>> Exists permutation groups?:", (perm_groups != -1))
print(">>> Are groups already in F''?:", ((schematas.tolist(), perm_groups) in TS))
if (perm_groups != -1) and not ((schematas.tolist(), perm_groups) in TS):
# do some weird permutation group testing
allowed_perm_groups = _check_schemata_permutations_v2(schematas, perm_groups, verbose=verbose, verbose_level=verbose_level)
if verbose and verbose_level > 15:
print('>>> Permutation testing result:', allowed_perm_groups)
if allowed_perm_groups is not None:
if verbose and verbose_level > 15:
print(">>> RESULTS: adding F'': %s , Idxs: %s" % (schematas.tolist(), allowed_perm_groups))
TS.append((schematas.tolist(), allowed_perm_groups))
else:
if verbose and verbose_level > 10:
print('>>> Generate combinations of schematas (m-1) and add to Queue')
if schematas.shape[0] > 2:
for idxs_subset in itertools.combinations(np.arange(0, n_schematas), (n_schematas - 1)):
idxs_subset = list(idxs_subset)
schemata_subset = schematas[idxs_subset, :]
# This schemata has already been inserted onto the Queue before?
if schemata_subset.tostring() not in Q_history:
if verbose and verbose_level > 25:
print('>>> QUEUE: appending (idxs: %s)' % (idxs_subset))
print(schemata_subset)
Q.append(schemata_subset)
Q_history.add(schemata_subset.tostring())
else:
if verbose and verbose_level > 25:
print('>>> QUEUE: duplicate, skip (idxs: %s)' % (idxs_subset))
if verbose:
print('>>> TWO-SYMBOLS:')
for i, (tss, perms) in enumerate(TS):
print("F''-%d: %s | Perms: %s" % (i, tss, perms))
# Simplification. Check if there are TSs that are completely contained within others.
# 'ts' = Two-Symbol
# 'cx' = Complexity
# 'xs' = Expanded Logic
TSs = {
i: {
'tss': tss,
'perms': perms,
'cx': _calc_ts_complexity(tss, perms),
'xl': _expand_ts_logic(tss, perms)
} for i, (tss, perms) in enumerate(TS)
}
# Loops all combinations (2) of TS
for (i, j) in itertools.combinations(TSs.keys(), 2):
try:
a_in_b, b_in_a = _check_schema_within_schema(TSs[i]['xl'], TSs[j]['xl'])
except:
continue
else:
cx_a = TSs[i]['cx']
cx_b = TSs[j]['cx']
# A or B contained in the other, keep only contained.
if a_in_b and not b_in_a:
del TSs[i]
elif b_in_a and not a_in_b:
del TSs[j]
elif a_in_b and b_in_a:
# Keep most complex
if cx_a < cx_b:
del TSs[i]
elif cx_b < cx_a:
del TSs[j]
else:
# they are equal, delete either one
del TSs[i]
if verbose:
print('>>> TWO-SYMBOLS (simplified):')
for i, (tss) in TSs.items():
print("F''-%d: %s | Perms: %s | CX: %s" % (i, tss['tss'], tss['perms'], tss['cx']))
# Final List (from simplified)
TSf = [(tss['tss'][0], tss['perms'], []) for tss in TSs.values()]
# Check if all PI are being covered. If not, include the PI on the TS list
if verbose:
print('>>> Check all PI are accounted for in the TS')
for i, pi in enumerate(pi_matrix, start=0):
if not any([_check_schema_within_schema([pi.tolist()], tss['xl'], dir='a', verbose=verbose)[0] for tss in TSs.values()]):
if verbose:
print("PI-%d '%s' Not in list, ADDING." % (i, pi.tolist()))
TSf.append((pi.tolist(), [], []))
else:
if verbose:
print("PI-%d '%s' OK." % (i, pi.tolist()))
if verbose:
print('>>> Check for Same-Symbol permutables')
# NEW: Step to include same-symbol permutables
for ts, perms, sames in TSf:
# Indices of permutables inputs
idxs = list(set([idx for idxs in perms for idx in idxs]))
# Makes the F'' into a Collum Array so it can be used by '_count_cols_symbols_vX'
ts_matrix = np.array([ts]).T
# Remove Inputs (columns) that already have permutable symbols. Only if there are permutables
if len(idxs):
rmask = np.array(idxs)
ts_matrix_left = ts_matrix[~rmask, :]
else:
ts_matrix_left = ts_matrix
if verbose and verbose_level > 10:
print("> F'' Original:")
print(ts_matrix)
print("> Permutables: %s" % (perms))
print("> F'' without permutables:")
print(ts_matrix_left)
counts_matrix = _count_cols_symbols_v2(pi_matrix=ts_matrix_left.T, verbose=False, verbose_level=verbose_level)
perm_groups = _check_identical_cols_count_symbols_v2(counts_matrix=counts_matrix, verbose=verbose, verbose_level=verbose_level)
sames.extend(perm_groups)
# Step to convert the pi list to string
for i, (ts, perms, sames) in enumerate(TSf, start=0):
ts = ''.join(map(str, ts))
TSf[i] = (ts, perms, sames)
# Final list after all PI were accounted for
if verbose:
print('>>> TS (final list):')
for i, tss, sms in TSf:
print("TS: '%s' | Perm Idx: %s | Sms Idx: %s" % (i, tss, sms))
return TSf
def _calc_ts_complexity(tss, pers):
""" Calculates the complexity of a TS schema
Complexity = (Number of Schemas + Number of Permutable Symbols + Lenght of each Permutable Symbol)
"""
return len(tss) + sum([len(per) for ts, per in zip(tss, pers)])
def _check_schema_within_schema(la, lb, dir=None, verbose=False):
""" Check is a Two-Symbol schemata is covered by another.
This is used to simplify the number of TS schematas returned.
The arguments for this function are generated by `_expand_ts_logic`.
Args:
tsa (list) : A list of :math:`F'` schematas that a Two-Symbol :math:`F''` schemata can cover.
tsb (list) : A list of :math:`F'` schematas that a Two-Symbol :math:`F''` schemata can cover.
dir (string) : The direction to check, either ``a`` or ``b`` is in the other.
Defaults to both directions.
"""
a_in_b, b_in_a = None, None
#
if dir != 'b':
a_in_b = all([(xa in lb) for xa in la])
if verbose:
print('%s in %s : %s' % (la, lb, a_in_b))
if dir != 'a':
b_in_a = all([(xb in la) for xb in lb])
if verbose:
print('%s in %s : %s' % (lb, la, b_in_a))
#
return a_in_b, b_in_a
def _expand_ts_logic(two_symbols, permut_indexes):
""" Expands the Two-Symbol logic to all possible prime-implicants variations being covered.
Args:
two_symbols (list) : Two-Symbol schematas list-of-lists.
Returns:
(list) : a list of :math:`F'` covered by this Two-Symbol.
"""
# If receiving a binary string, convert to list of lists
if isinstance(two_symbols, str):
two_symbols = [list(two_symbols)]
# Queue
Q = deque()
Q.extend(two_symbols)
logics = []
#
while Q:
implicant = np.array(Q.pop())
for idxs in permut_indexes:
# Permutation of all possible combinations of the values that are permutable.
for vals in itertools.permutations(implicant[idxs], len(idxs)):
# Generate a new schema
_implicant = copy.copy(implicant)
_implicant[idxs] = vals
# Insert to list of logics if not already there
if not(_implicant.tolist() in logics):
logics.append(_implicant.tolist())
Q.append(_implicant.tolist())
return logics
def _check_schemata_permutations_v2(schematas, perm_groups, verbose=False, verbose_level=0):
""" Checks if the permutations are possible
Note:
Not sure if this is really needed.
"""
if verbose and verbose_level > 20:
print("-- Check Schemata Permutations (v2) : g(H',L) --")
allowed_perm_groups = []
all_indices = set([i_index for x_group in perm_groups for i_index in x_group])
for x_group in perm_groups:
sofar = []
for i_index in range(len(x_group) - 1):
x_index = x_group[i_index]
small_group = [x_index]
if not (x_index in sofar):
sofar.append(x_index)
for y_index in x_group[(i_index + 1)::]:
if (not(y_index in sofar)) and _can_swap_v2(schematas[:, [x_index, y_index]], verbose=verbose, verbose_level=verbose_level):
small_group.append(y_index)
sofar.append(y_index)
if len(small_group) > 1:
allowed_perm_groups.append(small_group)
if verbose and verbose_level > 30:
print('> allowed_perm_groups', allowed_perm_groups)
if set([i_index for x_group in allowed_perm_groups for i_index in x_group]) == all_indices:
return allowed_perm_groups
return None
def _can_swap_v2(schemata_subset, verbose=False, verbose_level=0):
"""Determines if two schemata subsets can be swapped"""
if verbose and verbose_level > 40:
print('> Can Swap?:',)
can_switch = 1
for row in schemata_subset[:, [1, 0]]:
can_switch *= np.any(np.all(schemata_subset == row, axis=1))
if verbose and verbose_level > 40:
print(can_switch)
return can_switch
def _check_col_counts_v3(counts_matrix, verbose=False, verbose_level=0):
""" This function is used to find permutable symbols.
Args:
counts_matrix (numpy.ndarray) : a matrix where rows are inputs and columns are possible input types (0,1 or #)
Returns:
perm_groups (list) : a list of the indexes that can be permuted.
"""
if verbose and verbose_level > 30:
print('-- Check Col Counts (v3) --')
counts = {} # Multi Counts
perm_groups = [] # A list of groups of Permutable Indexes
for i, row in enumerate(counts_matrix, start=0):
# a tuple (hashable) version of the row counts
row_tuple = tuple(row)
if row_tuple in counts:
# we have seen this one before, so add it to the permutation group
counts[row_tuple].append(i)
elif np.count_nonzero(row) >= 2:
# we have not seen this count before, it is not a fixed variable, so create a new entry for it
counts[row_tuple] = [i]
else:
# we will skip fixed variables
pass
# Append non-constants that have permutable positions
for col, idxs in counts.items():
if verbose and verbose_level > 40:
print(col, ':', idxs)
if len(idxs) == 1:
return -1
elif len(idxs) >= 1:
perm_groups.append(idxs)
if verbose and verbose_level > 40:
print('counts:', counts)
print('perm_groups:', perm_groups)
if len(perm_groups):
return perm_groups
else:
return -1
def _check_identical_cols_count_symbols_v2(counts_matrix, verbose=False, verbose_level=0):
""" This function is used to find same symbol permutables. In practice it is a variance of `_check_cols_symbols_vX`
Args:
counts_matrix (numpy.ndarray) : a matrix where rows are inputs and columns are possible input types (0,1 or #)
Returns:
perm_groups (list) : a list of the indexes that can be permuted
"""
if verbose and verbose_level > 20:
print('-- Check Identical Col Counts (v2) --')
counts = {} # Multi Counts
perm_groups = [] # A list of groups of Permutable Indexes
for i, row in enumerate(counts_matrix, start=0):
# a tuple (hashable) version of the row counts
row = row.tolist()
row_tuple = tuple(row)
if verbose and verbose_level > 30:
print('RC: %s : %s' % (i, row_tuple))
if row_tuple in counts:
# we have seen this one before, so add it to the permutation group
counts[row_tuple].append(i)
else:
# we have not seen this count before, so create a new entry for it
counts[row_tuple] = [i]
# Append non-constants that have permutable positions
for col, idxs in counts.items():
if verbose and verbose_level > 30:
print(col, ':', idxs)
if len(idxs) >= 2:
perm_groups.append(idxs)
if verbose and verbose_level > 30:
print('counts:', counts)
print('sames_groups:', perm_groups)
if len(perm_groups):
return perm_groups
else:
return []
def _count_cols_symbols_v2(pi_matrix=None, verbose=False, verbose_level=0):
""" Given a matrix, where each row is a prime implicant, counts how many 0's, 1's and 2's are found in each column.
Args:
pi_matrix (numpy.ndarray) : a matrix ``n \times k`` of ``n`` prime implicants.
Returns:
counts (numpy.ndarray) : a matrix ``n \times 3`` where the entries are counts.
"""
if verbose and verbose_level > 20:
print(' -- Count Cols (v2) --')
# How many PI?
n = pi_matrix.shape[1]
# Instanciate count matrix
counts = np.zeros((n, 3), dtype=int)
for i, col in enumerate(pi_matrix.T):
# Count how many values are found and update the matrix of counts
val, cnt = np.unique(col, return_counts=True)
counts[i, val] = cnt
return counts
############ START OF TWO SYMBOL v.1 ############
# This version does not conside '11' and '00' as permutable symbols and had other bugs solved by v2
def find_two_symbols_v1(k=1, prime_implicants=None, verbose=False):
two_symbol_schemata_list = []
Partition_Options = [prime_implicants]
while len(Partition_Options) > 0:
# take first partition out of the set
schemata_list = np.array(map(list, Partition_Options.pop()))
if verbose:
print('== Partitions (v1) ==')
print('>>> A (m=%d)' % schemata_list.shape[0])
print(schemata_list)
# count the number of [0's, 1's, 2's] in each column
column_counts = _three_symbol_count_cols_symbols_v1(k=k, transition_list=schemata_list)
# find the permutation groups based on column counts
permutation_groups = _check_counts_v1(column_counts)
if (permutation_groups != -1):
if verbose:
print('>>> There are permutable groups! Lets loop them')
for x_group in permutation_groups:
# find the row counts by taking the transpose of the truncated schemata list
row_counts = _three_symbol_count_cols_symbols_v1(k=schemata_list.shape[0], transition_list=schemata_list[:, x_group].T)
if verbose:
print('>>> ROW Schema Counts:')
print(row_counts)
# make sure all row counts are the same
if len(row_counts) != row_counts.count(row_counts[0]):
permutation_groups = -1
if verbose:
print(">>> Permutation groups:", (permutation_groups != -1), permutation_groups)
print(">>> Permuted groups already in F'':", ((schemata_list.tolist(), permutation_groups) in two_symbol_schemata_list))
if (permutation_groups != -1) and not ((schemata_list.tolist(), permutation_groups) in two_symbol_schemata_list):
# do some weird permutation group testing
allowed_permutation_groups = _check_schemata_permutations_v1(schemata_list, permutation_groups)
if allowed_permutation_groups != []:
if verbose:
print("ADDING to F'':", schemata_subset)
two_symbol_schemata_list.append((schemata_list.tolist(), allowed_permutation_groups))
else:
if schemata_list.shape[0] > 2:
for schemata_subset in itertools.combinations(schemata_list, (schemata_list.shape[0] - 1)):
if verbose:
print('ADDING to Queue:', schemata_subset)
Partition_Options.append(np.array(schemata_subset))
if verbose:
print('Partition_Options:', Partition_Options)
final_list = []
prime_accounted = []
for p_implicant in prime_implicants:
p_implicant = list(p_implicant)
if not (p_implicant in prime_accounted):
for f_double_prime, r_perm in two_symbol_schemata_list:
for f_prime in f_double_prime:
if np.all(f_prime == p_implicant):
final_list.append((p_implicant, r_perm))
for account_prime in f_double_prime:
prime_accounted.append(list(account_prime))
if not (p_implicant in prime_accounted):
final_list.append((p_implicant, []))
return final_list
def _check_schemata_permutations_v1(schemata_list, permutation_groups):
allowed_permutation_groups = []
all_indices = set([i_index for x_group in permutation_groups for i_index in x_group])
for x_group in permutation_groups:
sofar = []
for i_index in range(len(x_group) - 1):
x_index = x_group[i_index]
small_group = [x_index]
if not (x_index in sofar):
sofar.append(x_index)
for y_index in x_group[(i_index + 1)::]:
if (not(y_index in sofar)) and _can_swap_v1(schemata_list[:, [x_index, y_index]]):
small_group.append(y_index)
sofar.append(y_index)
if len(small_group) > 1:
allowed_permutation_groups.append(small_group)
if set([i_index for x_group in allowed_permutation_groups for i_index in x_group]) == all_indices:
return allowed_permutation_groups
return []
def _can_swap_v1(schemata_subset):
can_switch = 1
for row in schemata_subset[:, [1, 0]]:
can_switch *= np.any(np.all(schemata_subset == row, axis=1))
return can_switch
def _check_counts_v1(column_counts=[]):
print('-- Column Counts (v1) --')
print(column_counts)
unique_col_counts = []
permutation_groups = []
for i_col_count, x_col_count in enumerate(column_counts):
print('RC: %d : %s' % (i_col_count, x_col_count))
if x_col_count.count(0) >= 2:
# this is a constant column so skip it
pass
elif x_col_count in unique_col_counts:
# we have seen this one before, so add it to the permutation group
permutation_groups[unique_col_counts.index(x_col_count)].append(i_col_count)
else:
# we have not seen this count before, so create a new entry for it
unique_col_counts.append(x_col_count)
permutation_groups.append([i_col_count])
# check if a singleton permutation group exists
if [len(x_group) for x_group in permutation_groups].count(1) > 0:
print('counts:', permutation_groups)
return -1
else:
print('counts:', permutation_groups)
return permutation_groups
def _three_symbol_count_cols_symbols_v1(k=1, transition_list=None):
column_counts = [[0, 0, 0] for i_col in range(k)]
for x_col in transition_list:
for i_entry, x_entry in enumerate(x_col):
if x_entry == '0':
column_counts[i_entry][0] += 1
elif x_entry == '1':
column_counts[i_entry][1] += 1
elif x_entry == '2':
column_counts[i_entry][2] += 1
return column_counts
############ END OF TWO SYMBOL v.1 ############
def __ts_covers(two_symbol, permut_indexes, input, verbose=False):
"""Helper method to test if an input is being covered by a two symbol permuted implicant
Args:
two_symbol (string): the two_symbol implicant.
permut_indexes (list): a list-of-lists of the implicant indexes that are permutables.
input (string): the input string to be checked.
Returns:
x (bool): True if covered else False.
"""
# No permutation, just plain implicant coverage?
if not len(permut_indexes):
if __pi_covers(two_symbol, input):
return True
# There are permutations to generate and check
else:
# NEW METHOD: Generates the expanded logic of the Two-Symbol Schema
for gen_implicant in _expand_ts_logic(two_symbol, permut_indexes):
if __pi_covers(gen_implicant, input):
return True
"""
# OLD METHOD
for idxs in permut_indexes:
# Extract the charactes that can be permuted
chars = [implicant[idx] for idx in idxs]
# Generate all possible permutations of these symbols
permut_chars = itertools.permutations(chars, len(idxs))
for permut_chars in permut_chars:
# Generate a new implicant and substitute the charactes with the permuted ones
tmp = list(implicant)
for idx,char in zip(idxs,permut_chars):
tmp[idx] = char
# The new permuted implicate is covered?
if __pi_covers(tmp, input):
return True
"""
return False
def computes_ts_coverage(k, outputs, two_symbols):
""" Computes the input coverage by Two Symbol schematas.
Args:
k (int): the number of inputs.
outpus (list): the list of transition outputs.
two_symbols (list): The final list of Two Symbol permutable schematas. This is returned by `find_two_symbols`.
Returns:
coverage (dict): a dictionary of coverage where keys are inputs states and values are lists of the Two Symbols covering that input.
"""
ts_coverage = {}
for statenum in range(2**k):
binstate = statenum_to_binstate(statenum, base=k)
ts_coverage[binstate] = covering_twosymbols = []
output = int(outputs[statenum])
if output == 2:
output = [0, 1]
else:
output = [int(outputs[statenum])]
for t in output:
for implicant, permut_indxs, same_symbols_indxs in two_symbols[t]:
if __ts_covers(implicant, permut_indxs, binstate):
covering_twosymbols.append((implicant, permut_indxs, same_symbols_indxs))
#
return ts_coverage
``` |
{
"source": "johnmfula/GitOAuth",
"score": 3
} |
#### File: johnmfula/GitOAuth/main.py
```python
import requests
from flask import Flask, request
CLIENT_ID = ''
CLIENT_SECRET = ''
GITHUB_TOKEN_URL = 'https://github.com/login/oauth/access_token'
app = Flask(__name__)
@app.route('/')
def index():
return '<a href="https://github.com/login/oauth/authorize?client_id={}"> Login with Github</a>'.format(CLIENT_ID)
@app.route('/authorize')
def authorize():
code = request.args.get('code')
data = {'code': code, 'client_id': CLIENT_ID, 'client_secret': CLIENT_SECRET }
headers = {'Accept': 'application/json'}
response = requests.post(GITHUB_TOKEN_URL, data=data, headers=headers)
token = response.json()['access_token']
print(token)
return '<h1> SUCCESS!!! THE CODE IS: {}</h1>'.format(code)
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "johnmgregoire/2013JCAPDataProcess",
"score": 2
} |
#### File: johnmgregoire/2013JCAPDataProcess/commitmenu.py
```python
import sys, os
from PyQt4 import QtCore, QtGui
from time import strftime, localtime
import re
class MainMenu(QtGui.QMainWindow):
def __init__(self):
super(MainMenu, self).__init__()
self.versionName = None
self.verifiedName = None
self.initUI()
""" initializes the user interface for this commit menu """
def initUI(self):
self.setGeometry(500, 200, 600, 100)
self.setWindowTitle('Data Analysis File Committer')
self.mainWidget = QtGui.QWidget(self)
self.setCentralWidget(self.mainWidget)
self.secondaryWidget = QtGui.QWidget(self)
self.mainLayout= QtGui.QGridLayout()
self.secondaryLayout= QtGui.QGridLayout()
self.mainWidget.setLayout(self.mainLayout)
self.secondaryWidget.setLayout(self.secondaryLayout)
self.directions = QtGui.QLabel('Please select the folder you wish to.', self)
self.mainLayout.addWidget(self.directions, 0,0)
self.mainLayout.addWidget(self.secondaryWidget)
selectFolder = QtGui.QPushButton('Select Folder', self)
selectFolder.clicked.connect(self.selectProgram)
self.secondaryLayout.addWidget(selectFolder, 0, 0)
self.fileSelected = QtGui.QLineEdit(self)
self.fileSelected.setReadOnly(True)
self.secondaryLayout.addWidget(self.fileSelected, 0, 1)
self.status = QtGui.QLabel('', self)
self.mainLayout.addWidget(self.status)
self.show()
""" textFileTuple is signal received from file dialog; 0th item is string of
file/folder names to display in line edit, 1st item is list of filepaths
(basenames) to load """
def loadData(self, textFileTuple):
self.fileSelected.setText(textFileTuple[0])
self.files = textFileTuple[1]
print len(self.files)
""" deals with getting relevent information for the file ones wishes to commit """
def selectProgram(self):
self.programDialog = QtGui.QFileDialog(self,
caption = "Select a version folder containing data analysis scripts")
self.programDialog.setFileMode(QtGui.QFileDialog.Directory)
# if user clicks 'Choose'
if self.programDialog.exec_():
self.status.setText('')
# list of QStrings (only one folder is allowed to be selected)
dirList = self.programDialog.selectedFiles()
targetDir = os.path.normpath(str(dirList[0]))
pyFiles = filter(lambda f: f.endswith('.py'), os.listdir(targetDir))
# set the line edit and get save the location of the pyFiles
self.loadData(tuple((targetDir,pyFiles)))
print pyFiles
# is the name valid with our version naming standards
nameValidity = self.versionNameVerifier(targetDir)
# if a file's name was invalid to commit
if nameValidity[0] == False:
# deals with renaming the program
newTargetDir = self.renameProgram(targetDir,nameValidity[1])
pyFiles = filter(lambda f: f.endswith('.py'), os.listdir(newTargetDir))
self.loadData(tuple((newTargetDir,pyFiles)))
if nameValidity[0] is not None:
self.status.setText('Your file has been committed.')
""" verifies that the name of the new version folder matches the standard naming """
def versionNameVerifier(self,directory):
plainDirectory = os.path.dirname(directory)
self.versionName = os.path.basename(directory)
dateExpected = strftime("%Y%m%d", localtime())
pattern = '^v(' + dateExpected + ')([0-9])$'
result = re.match(pattern, self.versionName)
# go through all the valid names to check if either we have a match or we must
# renaming the name of the folder
for x in range(0,10):
pathToTest = os.path.join(plainDirectory, 'v' + dateExpected + str(x))
try:
if os.path.exists(pathToTest):
if directory == pathToTest and result:
return (True,None)
else:
pass
else:
return (False,pathToTest)
except:
print "TODO Something must have really gone wrong - put a logger maybe?"
print "It appears you might have done more than 10 commits in one day. \
We thus cannot commit your file. Please refrain from doing this in the future."
return (None,None)
""" deals with renaming the program with a valid name """
def renameProgram(self, oldpath, newpath):
newPath = os.path.normpath(newpath)
oldPath = os.path.normpath(oldpath)
os.rename(oldPath,newPath)
return newPath
""" TODO - A possible function to create """
def buildCompiled(self):
pass
# TODO: when it is already ready to go, perhaps call compiler.compileFile
# so that there is a .pyc file to make it faster. This only handles
# the startup being faster -- also make sure it doesn't get done everytime
# once a file gets compiled
def main():
app = QtGui.QApplication(sys.argv)
menu = MainMenu()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
```
#### File: johnmgregoire/2013JCAPDataProcess/fom_commandline.py
```python
import sys, os, argparse
import fomautomator
from fomautomator import pickle
import path_helpers
import fomautomator_helpers
import time
""" this file handles all the commandline flags and runs the fomautomator """
def main(argv):
parser = argparse.ArgumentParser()
# possible flags
parser.add_argument('-I','--inputfolder', type=str, help="The input folder.\
All the textfiles of this folder will be processed", nargs=1)
parser.add_argument('-i', '--inputfile', type=str, help="The input file.\
A single file that will be processed.", nargs=1)
parser.add_argument('-f', '--fileofinputs', type=str, help="File containing\
paths to input files, each in a new line. Every path\
(line) will be passed to the automator for processing", nargs=1)
parser.add_argument('-J','--jobname', type=str, help="The name you want\
to give the log file. It will have a .run extention\
while processing. This file will change its extension\
if to .done or .error. If more errors than the max\
number of errors it will be .error, else .done.",nargs=1)
parser.add_argument('-O', '--outputfolder', type=str, help="The destination\
folder where all outputs will be saved. raw data pck\
files will be saved here unless -R flag used. ", nargs=1, required=True)
parser.add_argument('-S', '--sourcefolder', type=str, help=" A folder where\
intermediate files are read from.", nargs=1)
parser.add_argument('-R', '--rawfolder', type=str, help="The folder where\
raw data files will be saved unless. If not used, they\
will be saved in the directory specified by -O", nargs=1)
parser.add_argument('-X', '--errornum', type=int, help="The maximum number\
of errors - zero or larger", nargs=1)
parser.add_argument('-P', '--parallel', help="A flag to use parallel\
processing. Different than sequential in logging and\
max error handling, also mainly used by GUI users.",\
action='store_true')
parser.add_argument('-V', '--funcversionpath', type=str, help= "The path to\
the version you want to used to process the functions.\
Else the default most recent version is used by looking into\
FUNC_DIR which is defined in the automator.py file", nargs=1)
parser.add_argument('-p', '--paramfile', type=str, help= "The path to a \
pickled parameters file created for this version of the \
functions. Will use the function parameters in this file \
to override the defaults.", nargs=1)
args = parser.parse_args(argv)
# the name of the program Module and the update Module
progModule = fomautomator.MOD_NAME
updateModule = fomautomator.UPDATE_MOD_NAME
# default values that get changed by commandline flags
paths = []
srcDir = None
dstDir = None
rawDataDir = None
jobname = ""
max_errors = 10
parallel = False
# the directory with all the versions of the functions
directoryWithAllVersions = fomautomator.FUNC_DIR
# this does not get changed by the commandline, it is currently more useful
# through the GUI when we do the database connection. it gets the experiement
# types in a short list for all the paths -- this allowss us to get the params.
# If unsure, always set to empty list.
technique_names = []
if not (args.inputfolder or args.inputfile or args.fileofinputs):
parser.error('Cannot proceed further as no form of input was specified\
Plesase use either -I,-i, or -f.')
if args.inputfolder:
paths += path_helpers.getFolderFiles(args.inputfolder[0], '.txt')
if args.inputfile:
paths += args.inputfolder
if args.fileofinputs:
try:
with open(args.fileofinputs[0], 'r') as fileWithInputFiles:
paths += fileWithInputFiles.read().splitlines()
except:
return "Your file containing input paths has failed, please make\
sure there is only one file path per line."
if args.jobname:
jobname=args.jobname[0]
else:
# use a default jobname - remove if unwanted
jobname = "job" + time.strftime('%Y%m%d%H%M%S',time.gmtime())
if args.errornum:
max_errors = args.errornum[0]
# there is no need to do an else because the flag is required
if args.outputfolder:
dstDir = args.outputfolder[0]
rawDataDir = args.outputfolder[0]
# reset the rawDataDir since a directory to save raw data files was given
if args.rawfolder:
rawDataDir = args.rawfolder[0]
if args.parallel:
parallel = args.parallel
# if we're giving a version path to use, great! Else, we will use
# that in our default folder and get the most recent version.
if args.funcversionpath:
versionName, prevVersion = fomautomator_helpers.getVersionsByName(args.funcversionpath[0])
sys.path.insert(1,args.funcversionpath[0])
else:
# gets the most recent version folder of the fomfunctions in the FUNC_DIR
versionName, prevVersion = fomautomator_helpers.getRVersions(fomautomator.FUNC_DIR)
# inserts only most recent version so correct functions are used
# as the naming of the function file is the same in all versions
sys.path.insert(1, os.path.join(fomautomator.FUNC_DIR,versionName))
# thedirectory where we should check for intermediates
if args.sourcefolder:
srcDir = args.sourcefolder[0]
if paths:
automator = fomautomator.FOMAutomator(paths, versionName,prevVersion,\
progModule,updateModule,technique_names,\
srcDir,dstDir,rawDataDir,max_errors,jobname)
# load parameters from a pickled file
if args.paramfile:
with open(args.paramfile[0], 'r') as paramfile:
version, fnames, params = pickle.load(paramfile)
# make sure that the parameters were created for this version
# of the functions
if version == versionName:
automator.setParams(fnames, params)
# otherwise, abort:
else:
return "The parameter file you gave is not compatible \
with this functions version."
# run the automator in the method described by the user
if parallel:
automator.runParallel()
else:
automator.runSequentially()
if __name__ == "__main__":
main(sys.argv[1:])
# an example of calling it with the command line
# python fom_commandline.py -I "C:\Users\dhernand.HTEJCAP\Desktop\Working Folder\5 File" -O "C:\Users\dhernand.HTEJCAP\Desktop\Working Folder\AutoAnalysisXML" -J "jobnametest"
```
#### File: fomfunctionversions/v201309100/fomfunctions.py
```python
from intermediatefunctions_firstversion import numpy
import intermediatefunctions_firstversion as inter
# this dictionary is required to know which figures of merit should
# be calculated for each type of experiment
# TO DO: come up with a better naming convention for this dictionary
EXPERIMENT_FUNCTIONS = {'CV': {'TafelSlopeVPerDec': [], 'TafelEstart_TafelValue': [],
'TafelFitVRange': [], 'TafelLogExCurrent': [],
'Max': [['I(A)'], ['I(A)_LinSub']], 'Min': [['I(A)'], ['I(A)_LinSub']],
'E_IThresh': [['I(A)'], ['I(A)_LinSub']],
'IllDiff': [['I(A)', 'max'], ['I(A)', 'min'],
['I(A)_LinSub', 'max'], ['I(A)_LinSub', 'min']]},
'OCV': {'Final': [['Ewe(V)']], 'Avg': [['Ewe(V)']],
'ArrSS': [['Ewe(V)']], 'IllDiff': [['Ewe(V)', 'avg']]},
'CP': {'Final': [['Ewe(V)']], 'Avg': [['Ewe(V)']],
'ArrSS': [['Ewe(V)']], 'IllDiff': [['Ewe(V)', 'avg']]},
'CA': {'Final': [['I(A)']], 'Avg': [['I(A)']],
'ArrSS': [['I(A)']], 'IllDiff': [['I(A)', 'avg']]}}
zero_thresh = 5.e-8 # threshold below which measured value is equivalent to zero -
# this is a property of the instrument
"""necessary arguments:
vshift=-(.187-0.045)
booldev_frac = 0.5
booldev_nout = 3
dydev_frac = 0.2
dydev_nout = 5
dydev_abs = 0.
dx = 1.
maxfracoutliers = 0.5
critsegVrange = 0.04
critsegIend = 3.e-5
critsegVend = 0.36
SGpts = 10 (nptsoneside for Savitzy-Golay smoothing)
"""
def TafelSlopeVPerDec(rawd, interd, var='I(A)', vshift=-(.187-0.045), boolDevFrac=0.5, boolDevNOut=3,
dyDevFrac=0.2, dyDevNOut=5, dyDevAbs = 0.,
dx=1., maxFracOutliers=0.5, critSegVRange=0.04, critSegIEnd=3.e-5,
critSegVEnd=0.36, SavGolPts=10):
# initialize the arrays to hold Tafel values (considered both
# intermediate data and figures of merit)
interd['Tafel_slopeVperdec'] = []
interd['Tafel_estart'] = []
interd['Tafel_fitVrange'] = []
interd['Tafel_logExCurrent'] = []
booldn_segstart = 3 * boolDevNOut
dn_segstart = 3 * dyDevNOut
inter.calcsegind(rawd, interd, SGpts=SavGolPts) # breaks experiment into segments
inter.calccurvregions(rawd, interd, SGpts=SavGolPts) # runs on all segments
linsub = inter.calcLinSub(rawd, interd, var=var) # returns 1 if successful, 0 if not
if not linsub:
interd['Tafel_slopeVperdec'] = float('nan')
interd['Tafel_estart'] = float('nan')
interd['Tafel_fitVrange'] = float('nan')
interd['Tafel_logExCurrent'] = float('nan')
return float('nan')
inter.SegSG(rawd, interd, SGpts=SGpts, order=1, k=var+'_LinSub')
for seg in range(len(interd['segprops_dlist'])):
inds=interd['segprops_dlist'][seg]['inds']
i=interd['I(A)_LinSub_SG'][inds]
v=rawd['Ewe(V)'][inds]+vshift
posinds=numpy.where(i>zero_thresh)
invboolarr=numpy.float32(i<=zero_thresh)
istart_segs, len_segs, fitdy_segs, fitinterc_segs=inter.findzerosegs(
invboolarr, boolDevFrac, boolDevNOut, booldn_segstart, SGnpts=SavGolPts,
dx=dx, maxfracoutliers=maxFracOutliers)
if len(istart_segs)==0:
# no Tafel segments
interd['Tafel_slopeVperdec'].append(float('nan'))
interd['Tafel_estart'].append(float('nan'))
interd['Tafel_fitVrange'].append(float('nan'))
interd['Tafel_logExCurrent'].append(float('nan'))
continue
ind=numpy.argmax(len_segs)
i0=istart_segs[ind]
i1=i0+len_segs[ind]
taffitinds=numpy.arange(i0, i1)
interd['segprops_dlist'][seg]['TafelFitInds']=inds[taffitinds]
i=i[i0:i1]
i[i<zero_thresh]=zero_thresh #needed due to outliers
v=v[i0:i1]
il=numpy.log10(i)
try:
istart_segs, len_segs, fitdy_segs, fitinterc_segs, dy=inter.findlinearsegs(
il, dyDevFrac, dyDevNOut, dn_segstart, dydev_abs=dyDevAbs, dx=dx, critdy_fracmaxdy=None)
except:
interd['Tafel_slopeVperdec'].append(float('nan'))
interd['Tafel_estart'].append(float('nan'))
interd['Tafel_fitVrange'].append(float('nan'))
interd['Tafel_logExCurrent'].append(float('nan'))
continue
if len(istart_segs)==0:
# no Tafel segments
interd['Tafel_slopeVperdec'].append(float('nan'))
interd['Tafel_estart'].append(float('nan'))
interd['Tafel_fitVrange'].append(float('nan'))
interd['Tafel_logExCurrent'].append(float('nan'))
continue
#only take those segments covering a certain V range and with a min current for the top 10th of the V range
# in the segment and positive slope for there on out and then take the steepest one.
ind=None
maxdy=0
npts=critSegVRange/dx
npts2=max(2, npts//10+1)
for count2, (it0, slen, dyv) in enumerate(zip(istart_segs, len_segs, fitdy_segs)):
if slen<npts:
continue
it1=it0+slen
if numpy.mean(i[it1-npts2:it1])<critSegIEnd:
continue
if numpy.mean(v[it1-npts2:it1])<critSegVEnd:
continue
if numpy.any(dy[it1:]<0.):
continue
if dyv>maxdy:
maxdy=dyv
ind=count2
if ind is None:
# no Tafel segments
interd['Tafel_slopeVperdec'].append(float('nan'))
interd['Tafel_estart'].append(float('nan'))
interd['Tafel_fitVrange'].append(float('nan'))
interd['Tafel_logExCurrent'].append(float('nan'))
continue
i0=istart_segs[ind]
i1=i0+len_segs[ind]
tafinds=numpy.arange(i0, i1)
it=il[tafinds]
vt=v[tafinds]
fitdy, fitint=numpy.polyfit(vt, it, 1)
interd['Tafel_slopeVperdec'].append(1./fitdy)
interd['Tafel_estart'].append(v[0])
interd['Tafel_fitVrange'].append(vt.max()-vt.min())
interd['Tafel_logExCurrent'].append(fitint)
interd['segprops_dlist'][seg]['TafelInds']=inds[taffitinds][tafinds]
#FOMs (the entire list):
return interd['Tafel_slopeVperdec']
def TafelEstart_TafelValue(rawd, interd):
return interd['Tafel_estart']
def TafelFitVRange(rawd, interd):
return interd['Tafel_fitVrange']
def TafelLogExCurrent(rawd, interd):
return interd['Tafel_logExCurrent']
def ArrSS(rawd, interd, x=['Ewe(V)', 'I(A)', 'I(A)_LinSub'],
weightExp=1., numTestPts=10):
if x == 'I(A)_LinSub':
x = interd[x]
else:
x = rawd[x]
i=numTestPts
s0=x[:i].std()/i**weightExp+1
while x[:i].std()/i**weightExp<s0 and i<len(x):
s0=x[:i].std()/i**weightExp
i+=numTestPts
return x[:i].mean()
def E_IThresh(rawd, interd, i=['I(A)', 'I(A)_LinSub'], v='Ewe(V)', iThresh=1e-5,
numConsecPts=20, setAbove=1, noThresh=1.):
if i == 'I(A)_LinSub':
i = interd[i]
else:
i = rawd[i]
v = rawd[v]
if not setAbove: # 0 for below, 1 for above
i *= -1
iThresh *= -1
keyPts = numpy.int16(i >= iThresh)
keyPtsConsec = [keyPts[x:x+numConsecPts].prod()
for x in range(len(keyPts)-numConsecPts)]
if True in keyPtsConsec:
ival = keyPtsConsec.index(True)
return v[ival:ival+numConsecPts].mean()
else:
# return value indicating threshold not reached
return noThresh
def Avg(rawd, interd, x=['Ewe(V)', 'I(A)', 'I(A)_LinSub'], t='t(s)', interval=1000,
numStdDevs=2., numPts=1000, startAtEnd=0):
if x == 'I(A)_LinSub':
x = interd[x]
else:
x = rawd[x]
t = rawd[t]
# if we wish to start at the end, reverse the lists
if startAtEnd:
x = x[::-1]
t = t[::-1]
# restricts x to requested t-interval
x = x[numpy.abs(t-t[0])<interval]
# removes outliers using mean and std
x=inter.removeoutliers_meanstd(x, numPts//2, numStdDevs) # // = integer division
# the mean of the data now that outliers have been removed
return x.mean()
def Final(rawd, interd, x=['Ewe(V)', 'I(A)', 'I(A)_LinSub']):
if x == 'I(A)_LinSub':
x = interd[x]
else:
x = rawd[x]
return x[-1]
def Max(rawd, interd, x=['Ewe(V)', 'I(A)', 'I(A)_LinSub']):
if x == 'I(A)_LinSub':
x = interd[x]
else:
x = rawd[x]
return numpy.max(x)
def Min(rawd, interd, x=['Ewe(V)', 'I(A)', 'I(A)_LinSub']):
if x == 'I(A)_LinSub':
x = interd[x]
else:
x = rawd[x]
return numpy.min(x)
def IllDiff(rawd, interd, illum='Illum', thisvar=['Ewe(V)', 'I(A)', 'I(A)_LinSub'],
othervar='I(A)', t='t(s)', fomName=['min', 'max', 'avg'],
lightStart=0.4, lightEnd=0.95, darkStart =0.4, darkEnd=0.95,
illSigKey='Ach(V)', sigTimeShift=0., illThresh=0.8,
illInvert=1):
if (thisvar == 'I(A)' or thisvar == 'I(A)_LinSub'):
othervar = 'Ewe(V)'
if sigTimeShift:
# add intermediate value 'IllumMod'
interd['IllumMod']=inter.illumtimeshift(rawd, illSigKey, t, sigTimeShift)
illSigKey = 'IllumMod'
if illInvert: # logical invert
# multiply illumination signal by -1
interd['IllumMod'] *= -1
elif illInvert: # logical invert
# add intermediate value 'IllumMod'
# multiply illumination signal by -1
interd['IllumMod'] = -1*rawd[illSigKey]
illSigKey = 'IllumMod'
err = inter.calcdiff_ill_caller(rawd, interd, ikey = illSigKey,
thresh = illThresh, ykeys = [thisvar],
xkeys = [othervar, t],
illfracrange = (lightStart, lightEnd),
darkfracrange = (darkStart, darkEnd))
if err:
# if this is not an illumination experiment, intermediate
# illumination values aren't necessary
for illIntermed in filter(lambda intermed: 'ill' in intermed.lower(),
interd.keys()):
del(interd[illIntermed])
return float('nan')
if fomName == 'min':
return min(interd[thisvar+'_illdiff'])
if fomName == 'max':
return max(interd[thisvar+'_illdiff'])
else:
return interd[thisvar+'_illdiffmean']
``` |
{
"source": "johnmgregoire/JCAPDataProcess",
"score": 2
} |
#### File: JCAPDataProcess/AnalysisFunctions/bgmath_fcn.py
```python
import os,csv, numpy as np, copy, scipy as sp, math, pprint,sys,inspect
import matplotlib.pyplot as plt
plt.ion()
##from datamanipulation import *
from scipy import stats,signal
import matplotlib.cm as cm
from matplotlib import colors
import pickle
import time
import tkMessageBox
import shutil
def residuals(params,xdata,ydata): return sum((np.abs(ydata-linpiecewise(params,xdata))/ydata)**2)
#Division by ydata is to ensure that errors at low absorbance values are given high importance and the ones
#at higher absorbance values are given lower importance (since the important region is where the absorbance starts increasing
#from low value prior to band gap to higher values...The noise at very high energy levels usually having high absorbance will not
#effect the quality of the result a lot)
# params: zeroth index corresponds to the yfit value at x=x0. Next num_knots indices correspond to the knot positions. Next num_knots-1 indices correspond
#to slopes for the linear regions. Piecewise linear addition to obtain the value of fitdata at a specific value of x.
def linpiecewise(params,x):
fitdata=np.ones(np.shape(x))*params[0]
num_knots=len(params)/2
for loc in np.arange(1,num_knots):
fitdata+=params[num_knots+loc]*(x>=params[loc])*(np.min(np.array(\
[np.ones(np.shape(x))*params[loc+1],x]),axis=0)-params[loc])
return fitdata
#xorder: if loc in data represents smaller value of energy to higher value of energy then xorder is increasing otherwise decreasing
#first constraint in cons is to ensure minimum knot distance min_knotdist. Second equality constraint indicates that the first and the last knot are fixed to the first and last values of x
#Initially knot locations are evenly spread out between xvalues of interest and all slopes are set to zero. tolerance is the tolerance for convergence of linear properties.
def linearfit(xdata,ydata,num_knots,min_knotdist,xorder,options,tol):
if xorder=='increasing': sign=1
elif xorder=='decreasing': sign=-1
locs=np.arange(1,num_knots+1)
cons=({'type':'ineq','fun':lambda params: params[locs[0:-1]+1]*sign-(params[locs[0:-1]]*sign+min_knotdist)},\
{'type':'eq','fun':lambda params: np.array([params[1],params[num_knots]])-np.array([xdata[0],xdata[-1]],)})
init_x=np.arange(xdata[0],xdata[-1],(xdata[-1]-xdata[0])/((num_knots-1)))
if np.size(init_x)!=num_knots:
init_x=np.hstack((init_x,xdata[-1]))
else:
init_x[-1]=xdata[-1]
init_idxs=[np.argmin(np.abs(xdata-init_x[loc])) for loc in xrange(np.size(init_x))]
init_slopes=[(ydata[init_idxs[loc+1]]-ydata[init_idxs[loc]])/(xdata[init_idxs[loc+1]]-xdata[init_idxs[loc]]) for loc in xrange(0,len(init_idxs)-1)]
init_params=np.hstack((ydata[0],init_x,init_slopes))
res = sp.optimize.minimize(residuals,init_params, args=(xdata,ydata),constraints=cons, method='SLSQP',options=options,tol=tol)
return res
def mergelinsegs(params,num_knots,max_merge_differentialTP,merge_linsegslopediff_percent):
knots=params[1:num_knots+1]
slopes=params[num_knots+1:2*num_knots]
if merge_linsegslopediff_percent>=1:
merge_linsegslopediff_percent=merge_linsegslopediff_percent/100.0
while True:
num_merges=0
j=0
loc=0
newslopes=np.empty(np.shape(slopes)[0])
newknots=np.empty(np.shape(knots)[0])
newslopes[0]=slopes[0]
newknots[0:2]=knots[0:2]
while loc <np.shape(slopes)[0]-1:
medianslope=(newslopes[j]*(newknots[j+1]-newknots[j])+slopes[loc+1]*(knots[loc+2]-knots[loc+1]))\
/(knots[loc+2]-newknots[j])
differentialTPdiff=(medianslope-newslopes[j])*(newknots[j+1]-newknots[j])
TPdiff=newslopes[j]*(newknots[j+1]-newknots[j])
TPdiffn=slopes[loc+1]*(knots[loc+2]-knots[loc+1])
if abs(differentialTPdiff)<min(max_merge_differentialTP,merge_linsegslopediff_percent*(TPdiff+TPdiffn)):
newslopes[j]=medianslope
slopes[loc]=medianslope
slopes[loc+1]=medianslope
newknots[j+1]=knots[loc+2]
loc=loc+1
num_merges+=1
else:
j=j+1
loc=loc+1
newslopes[j]=slopes[loc]
newknots[j]=knots[loc]
newknots[j+1]=knots[loc+1]
newslopes=np.delete(newslopes,np.s_[j+1::],axis=None)
newknots=np.delete(newknots,np.s_[j+2::],axis=None)
slopes=newslopes
knots=newknots
if num_merges==0:
break
num_knots=j+2
params=np.concatenate(([params[0]],knots,slopes),axis=0)
return params
#The block below creates data for each bg type and interacts with fitting, residual measurement functions to identify the best linear piecewise parameters which are then sent to calc_bandgap for bandgap calculations.
def fitresult(data,bgtyp,max_numbgs,num_knots,tol,min_allowedslope,min_bgTP_diff,min_bkgrdslope,min_bgbkgrdslopediff,\
min_finseglength,min_bgTP_finseg_diff,min_bgfinalseglength,max_merge_differentialTP,\
merge_linsegslopediff_percent,maxtol,min_knotdist=0.05,xorder='increasing',calcbg_abscissa=False,dispresult=False):
linfitd={}
yoffset=-np.min(data[bgtyp])+0.03 if np.min(data[bgtyp])<0.03 else 0
for maxiter in [1000,2000]:
res=linearfit(data['hv'],data[bgtyp]+yoffset,num_knots,min_knotdist,xorder,{'maxiter':maxiter,'disp':dispresult},tol)
if res.success:
break
if not res.success:
maxiter=2000
inittol=tol
for i in xrange(int(np.log10(inittol)),int(np.log10(maxtol))+1,-1):
tol=10.**i
res=linearfit(data['hv'],data[bgtyp]+yoffset,num_knots,min_knotdist,xorder,{'maxiter':maxiter,'disp':dispresult},tol)
if res.success:
break
if not res.success:
return [{},{'bgcode_0':9,'tol':np.NaN,bgtyp+'rel_rss':np.NaN}]
else:
tempparams=res.x
tempparams[0]-=yoffset
tempparams=mergelinsegs(tempparams,num_knots,max_merge_differentialTP,merge_linsegslopediff_percent)
linfitd,fomd=calc_bandgap(tempparams,np.size(tempparams)/2,max_numbgs,min_allowedslope,\
min_bgTP_diff,min_bkgrdslope,min_bgbkgrdslopediff,min_finseglength,\
min_bgTP_finseg_diff,min_bgfinalseglength,bgtyp,False)
data[bgtyp+'_linfit']=linpiecewise(np.concatenate(([linfitd['y0']],linfitd['knots'],linfitd['slopes']),axis=0),data['hv'])
fomd['tol']=tol
fomd['rel_rss']=residuals(tempparams,data['hv'],data[bgtyp])
if calcbg_abscissa:
temp_linfitd,temp_fomd=calc_bandgap(tempparams,np.size(tempparams)/2,max_numbgs,min_allowedslope,\
min_bgTP_diff,min_bkgrdslope,min_bgbkgrdslopediff,min_finseglength,\
min_bgTP_finseg_diff,min_bgfinalseglength,bgtyp,True)
linfitd.update(temp_linfitd)
fomd.update(temp_fomd)
return [linfitd,fomd]
def calc_bandgap(params,num_knots,max_numbgs,min_allowedslope,min_bgTP_diff,min_bkgrdslope,min_bgbkgrdslopediff,\
min_finseglength,min_bgTP_finseg_diff ,min_bgfinalseglength,bgtyp,calcbg_abscissa):
abscissa_extn='a_' if calcbg_abscissa else ''
knots=params[1:num_knots+1]
slopes=params[num_knots+1:2*num_knots]
num_slopes=np.shape(slopes)[0]
bgknots_lower=[]; bkgrdknots_lower=[]; bg=[]; abs_expl=[]; bgcode=[]
if (knots[num_slopes]-knots[num_slopes-1])>=min_finseglength:
num_segments=num_slopes
else:
num_segments=num_slopes-1
tot_segs=num_segments
for i in np.arange(0,tot_segs-1):
if slopes[i]<min_allowedslope:
num_segments=i
break
if num_segments==tot_segs:
for i in np.arange(1,num_segments-1):
# if 1 not in bgcode:
#==============================================================================
# TPdiff.extend([(knots[i+1]-knots[i])*slopes[i]])min_bgTP_finseg_diff
#==============================================================================
if slopes[i]>slopes[i-1] and slopes[i]>0 and slopes[i-1]>min_bkgrdslope:
TPdiff=(knots[i+1]-knots[i])*slopes[i]
if TPdiff>=min_bgTP_diff:
if slopes[i]>slopes[i+1]:
bgknots_lower.extend([i])
abs_expl.extend([TPdiff])
bgcode.extend([0])
if np.size(bgknots_lower)==0:
j=num_segments-1
if slopes[j]>slopes[j-1] and slopes[j]>0 and slopes[j-1]>min_bkgrdslope:
TPdiff=(knots[j+1]-knots[j])*slopes[j]
if TPdiff>=min_bgTP_finseg_diff and knots[j+1]-knots[j]>min_bgfinalseglength:
bgknots_lower.extend([j])
abs_expl.extend([TPdiff])
bgcode.extend([6])
if not calcbg_abscissa:
if np.size(bgknots_lower)!=0:
for i in np.arange(0,np.shape(bgknots_lower)[0]):
if i==0: low_limit=-1
else: low_limit=bgknots_lower[i-1]
if bgknots_lower[i]-1==low_limit:
del bgknots_lower[i::]
del abs_expl[i::]
del bgcode[i::]
break
# A band gap segments bkgrnd segment cannot be previous band gap segment
for loc in np.arange(bgknots_lower[i]-1,low_limit,-1):
if slopes[loc]<slopes[loc+1] and slopes[loc]>min_bkgrdslope:
if loc==low_limit+1:
bkgrdknots_lower.extend([loc])
# This block gets executed when the bkgrd segment is just to the right of the previous band gap segment
break
else: continue
elif loc!=bgknots_lower[i]-1:
bkgrdknots_lower.extend([loc+1])
break
else:
temp=np.shape(bgknots_lower)[0]
del bgknots_lower[i::]
del abs_expl[i::]
del bgcode[i::]
i=temp
# This block should never get executed because any bgknot will have corresponding bkgrdknot except
# when the previous segment is also a band gap segment taken care of bgknots_lower[i]-1==low_limit block above
if np.size(bgknots_lower)!=0:
if not (np.size(bgcode)==1 and bgcode[0]==1):
if not ((np.size(abs_expl)==np.size(bgknots_lower)) and \
(calcbg_abscissa or ((np.size(bgknots_lower)==np.size(bkgrdknots_lower)) \
and (len(np.where(np.not_equal(bgcode,1))[0])==np.size(bkgrdknots_lower))))):
raise ValueError('abs_expl,bgknots_lower,bgcodes and/or bkgrdknots_lower do not have the same size')
for i in np.arange(0,np.shape(bkgrdknots_lower)[0]):
if calcbg_abscissa:
if (slopes[bgknots_lower[i]])<min_bgbkgrdslopediff:
bgknots_lower[i],abs_expl[i]=-1000*np.ones([2,])
bgcode.extend([4])
elif (slopes[bgknots_lower[i]]-slopes[bkgrdknots_lower[i]])<min_bgbkgrdslopediff:
bgknots_lower[i],bkgrdknots_lower[i],abs_expl[i]=-1000*np.ones([3,])
bgcode.extend([4])
bgknots_lower=filter(lambda a: a != -1000, bgknots_lower)
bkgrdknots_lower=filter(lambda a: a != -1000, bkgrdknots_lower)
abs_expl=filter(lambda a: a != -1000, abs_expl)
if np.size(bgknots_lower)==0:
bgknots_lower,bkgrdknots_lower,bg,abs_expl=np.ones([4,1])*np.NaN
bgcode.extend([5])
else:
for i in np.arange(0,np.shape(bgknots_lower)[0]):
if not calcbg_abscissa:
[y1,y2]=linpiecewise(params,[knots[bgknots_lower][i],knots[bkgrdknots_lower][i]])
[m1,m2]=[slopes[bgknots_lower[i]],slopes[bkgrdknots_lower[i]]]
[x1,x2]=[knots[bgknots_lower[i]],knots[bkgrdknots_lower[i]]]
else:
[y1]=linpiecewise(params,[knots[bgknots_lower][i]])
[m1]=[slopes[bgknots_lower[i]]]
[x1,x2]=[knots[bgknots_lower[i]],0]
y2=0;m2=0
bg.extend([(y1-y2-(m1*x1-m2*x2))/(m2-m1)])
else:
bgknots_lower,bkgrdknots_lower,bg,abs_expl=np.ones([4,1])*np.NaN
bgcode.extend([3])
elif num_segments!=tot_segs:
if np.size(bgcode)==0:
bgknots_lower,bkgrdknots_lower,bg,abs_expl=np.ones([4,1])*np.NaN
bgcode.extend([1])
fomlist=['bgknots_lower','bkgrdknots_lower', 'bg','abs_expl','bgcode'] if not calcbg_abscissa else ['bgknots_lower','bg','abs_expl','bgcode']
linfit_keylist=['bgknots_lower', 'bkgrdknots_lower'] if not calcbg_abscissa else ['bgknots_lower']
fomd=dict([(abscissa_extn+lstk+'_'+str(idx),eval(lstk)[idx]) for lstk in fomlist for idx in xrange(min(len(bg),max_numbgs))])
linfitd=dict([(abscissa_extn+lstk+'_'+str(idx),eval(lstk)[idx]) for lstk in linfit_keylist for idx in xrange(min(len(bg),max_numbgs))])
if not calcbg_abscissa:
linfitd=dict(linfitd.items()+[('knots',knots),('slopes',slopes),('y0',params[0])])
if not np.isnan(abs_expl).all():
x=np.argmax([fomd[abscissa_extn+'abs_expl_'+str(idx)] for idx\
in xrange(min(len(bg),max_numbgs)) if not np.isnan(fomd[abscissa_extn+'abs_expl_'+str(idx)])])
fomd[abscissa_extn+'bg_repr']=fomd[abscissa_extn+'bg_'+str(x)]
fomd[abscissa_extn+'bgslope_repr']=slopes[bgknots_lower[x]]
if not calcbg_abscissa:
fomd[abscissa_extn+'bkgrdslope_repr']=slopes[bkgrdknots_lower[x]]
fomd[abscissa_extn+'bgcode_repr']=fomd[abscissa_extn+'bgcode_'+str(x)]
else:
fomd[abscissa_extn+'bg_repr']=np.nan
fomd[abscissa_extn+'bgslope_repr']=np.nan
if not calcbg_abscissa:
fomd[abscissa_extn+'bkgrdslope_repr']=np.nan
fomd[abscissa_extn+'bgcode_repr']=np.nan
fomd[abscissa_extn+'bgcode0_only']=fomd[abscissa_extn+'bg_0'] if len(bg)==1 and fomd[abscissa_extn+'bgcode_0']==0 else np.NaN
return [linfitd,fomd]
def identifypeaks(data,typ,abs_minallowedslope,max_allowed_2ndderiv):\
return len(np.where(np.logical_and((np.abs(data[typ+'_2ndderiv'])>=max_allowed_2ndderiv),\
(np.abs(data[typ+'_1stderiv'])<=abs_minallowedslope)))[0])>0
def runuvvis(data,inputvars):
code0='Successful assignment of bandgap linear segment using simple rules'
code1='Linear segment with a slope less than min_slope was found'
code2='Succesful assignment of bandgap linear segment using a slightly higher slope at following segment criterion but bgdiff > min in current segment; exists only the older versions and is obsolete from 3/6/16'
code3='No linear segment was observed for band gap'
code4='Band gap linear segment(s) deleted due to inability to identify background linear segment with sufficient difference in slope'
code5='All Band gap lin segs deleted due to inability to identify background linear segment with sufficient difference in slope'
code6='Final segment has slope higher than previous and explains bgdiff>min this check occurs only when no band gap has been found with other criteria above'
# There is a chance that you are underestimating band gaps
code7='Peaks were found'
code8='NaNs were found in the absorption spectrum'
code9='Linear fitting failed'
pfomd={};plinfitd={}
absicssa_extn='_a'
for bgtyp in inputvars['analysis_types']:
pfomd[bgtyp]={};plinfitd[bgtyp]={}
if np.isnan(data[bgtyp]).any():
pfomd[bgtyp]['bgcode_0']=8
if inputvars['calcbg_abscissa']:
pfomd[bgtyp+absicssa_extn]['bgcode_0']=8
continue
#Implementation of second round of filtering in cases where peaks exist is currently unsupported
if inputvars['use_absderivs_forpeaks']:
if identifypeaks(data,'abs',inputvars['abs_minallowedslope'],inputvars['max_absolute_2ndderiv']):
pfomd[bgtyp]['bgcode_0']=7
if inputvars['calcbg_abscissa']:
pfomd[bgtyp+absicssa_extn]['bgcode_0']=7
continue
plinfitd[bgtyp],pfomd[bgtyp]=fitresult(data,bgtyp,max_numbgs=inputvars['maxbgspersmp'],\
num_knots=inputvars['num_knots'],tol=inputvars['tol'],min_allowedslope=inputvars[bgtyp+'_min_allowedslope'],\
min_bgTP_diff=inputvars['min_bgTP_diff'],min_bkgrdslope=inputvars['min_bkgrdslope'],\
min_bgbkgrdslopediff=inputvars['min_bgbkgrdslopediff'],min_finseglength=inputvars['min_finseglength'],\
min_bgTP_finseg_diff=inputvars['min_bgTP_finseg_diff'],\
min_bgfinalseglength=inputvars['min_bgfinalseglength'],max_merge_differentialTP=inputvars['max_merge_differentialTP'],\
merge_linsegslopediff_percent=inputvars['merge_linsegslopediff_percent'],maxtol=inputvars['maxtol'],\
min_knotdist=inputvars['min_knotdist'],xorder='increasing',calcbg_abscissa=inputvars['calcbg_abscissa'],dispresult=False)
fomd=dict([(bgtyp+'_'+k,pfomd[bgtyp][k]) for bgtyp in pfomd.keys() for k in pfomd[bgtyp].keys()])
linfitd=dict([(bgtyp+'_'+k,plinfitd[bgtyp][k]) for bgtyp in plinfitd.keys() for k in plinfitd[bgtyp].keys()])
linfitd=dict(linfitd.items()+[(k+'_'+str(x),linfitd[k][idx]) for k in linfitd.keys() if k.split('_')[-1] in ['knots','slopes'] \
for idx,x in enumerate(xrange(len(linfitd[k])))])
[linfitd.pop(k) for k in linfitd.keys() if not np.isscalar(linfitd[k])]
return linfitd,fomd
```
#### File: JCAPDataProcess/AnalysisFunctions/xrfs_basics.py
```python
import numpy, copy,sys,os
if __name__ == "__main__":
sys.path.append(os.path.split(os.getcwd())[0])
sys.path.append(os.path.split(os.path.realpath(__file__))[0])
from fcns_math import *
from fcns_io import *
from csvfilewriter import createcsvfilstr
from Analysis_Master import *
from FOM_process_basics import FOMKEYSREQUIREDBUTNEVERUSEDINPROCESSING
def getapplicablefilenames_specific_usetypetech(expfiledict, usek, techk, typek, runklist=None, requiredparams=[], specificuse='data', specifictech=None, specifictype=None):
if not ((specificuse is None or usek==specificuse) and (specifictech is None or techk==specifictech) and (specifictype is None or typek==specifictype)):
return 0, {}
###from here down is like stdgetapplicablefilenames but without the keys and sample_no, etc. that would be ready from filleattr
requiredparams=[(rp+techk) if rp.endswith('__') else rp for rp in requiredparams]
requiredparams+=['plate_id']
if runklist is None:
runklist=expfiledict.keys()
runklist=[runk for runk in runklist \
if runk.startswith('run__') and \
(usek in expfiledict[runk]['run_use']) and \
('files_technique__'+techk) in expfiledict[runk].keys() and \
typek in expfiledict[runk]['files_technique__'+techk].keys()]
num_files_considered=numpy.int32([len(expfiledict[runk]['files_technique__'+techk][typek]) for runk in runklist]).sum()
filedlist=[dict(\
dict([(reqparam, expfiledict[runk]['parameters'][reqparam]) for reqparam in requiredparams]),\
expkeys=[runk, 'files_technique__'+techk, typek, fnk], run=runk, fn=fnk\
)\
for runk in runklist \
for fnk in expfiledict[runk]['files_technique__'+techk][typek].keys()\
if not (False in [reqparam in expfiledict[runk]['parameters'].keys() for reqparam in requiredparams])\
]
filedlist=[dict(d, user_run_foms=expfiledict[d['run']]['user_run_foms'] if 'user_run_foms' in expfiledict[d['run']].keys() else {}) for d in filedlist]#has to be here because only place with access to expfiledict
filedlist=[dict(d, run_foms=expfiledict[d['run']]['run_foms'] if 'run_foms' in expfiledict[d['run']].keys() else {}) for d in filedlist]#has to be here because only place with access to expfiledict
return num_files_considered, filedlist
def getapplicable_runs_paramsonly(expfiledict, usek, runklist=None, requiredparams=[]):
requiredparams+=['plate_id']
if runklist is None:
runklist=expfiledict.keys()
runklist=[runk for runk in runklist \
if runk.startswith('run__') and \
(usek in expfiledict[runk]['run_use'])]
num_files_considered=len(runklist)
filedlist=[dict(\
dict([(reqparam, expfiledict[runk]['parameters'][reqparam]) for reqparam in requiredparams]),\
run=runk, runparamd=expfiledict[runk]['parameters'], runint=int(runk.partition('run__')[2])\
)\
for runk in runklist \
if not (False in [reqparam in expfiledict[runk]['parameters'].keys() for reqparam in requiredparams])\
]
return num_files_considered, filedlist
class Analysis__XRFS_EDAX(Analysis_Master_nointer):
def __init__(self):
self.analysis_fcn_version='1'
self.dfltparams={'quant_method': 'FP No Stds', 'Inte_append': '.CPS', 'Wt_append': '.WtPerc', 'At_append': '.AtPerc'}
self.params=copy.copy(self.dfltparams)
self.analysis_name='Analysis__XRFS_EDAX'
self.requiredkeys=[]
self.optionalkeys=[]
self.requiredparams=[]
self.fomnames=['StgLabel', 'StagX', 'StagY', 'StagZ', 'StagR']
self.plotparams={}#dict({}, plot__1={})
#self.plotparams['plot__1']['x_axis']='t(s)'
#self.plotparams['plot__1']['series__1']='I(A)'
self.csvheaderdict=dict({}, csv_version='1', plot_parameters={})
self.csvheaderdict['plot_parameters']['plot__1']=dict({}, fom_name=self.fomnames[0], colormap='jet', colormap_over_color='(0.5,0.,0.)', colormap_under_color='(0.,0.,0.)')
def getapplicablefilenames(self, expfiledict, usek, techk, typek, runklist=None, anadict=None, calcFOMDialogclass=None):
self.num_files_considered, self.filedlist=getapplicablefilenames_specific_usetypetech(expfiledict, usek, techk, typek, runklist=runklist, specificuse=None, specifictech='XRFS', specifictype='batch_summary_files')
self.description='reformatting of XRFS batch_summary_files'
# if len(self.filedlist)>0:
# self.processnewparams(calcFOMDialogclass=calcFOMDialogclass)
self.fomnames=['StgLabel', 'StagX', 'StagY', 'StagZ', 'StagR']#to reset this after function used
return self.filedlist
def processnewparams(self, calcFOMDialogclass=None):
return
def perform(self, destfolder, expdatfolder=None, writeinterdat=False, anak='', zipclass=None, expfiledict=None, anauserfomd={}):#zipclass intended to be the class with open zip archive if expdatfolder is a .zip so that the archive is not repeatedly opened
self.initfiledicts()
self.strkeys_fomdlist=[]
#go through the files once to read data and sample_no.txt
for filed in self.filedlist:
fn=filed['fn']
runp=expfiledict[filed['run']]['run_path']
runp=buildrunpath(runp)
runzipclass=gen_zipclass(runp)
p=os.path.join(runp, fn)
filed['batch_summary']=read_xrfs_batch_summary_csv(p, select_columns_headings__maindict=self.fomnames, \
include_inte_wt_at_subdicts=True, include_transitionslist_bool=True, read_sample_no_bool=True)
if 'StgLabel' in self.fomnames and not ('StgLabel' in filed['batch_summary'].keys()):
self.fomnames.remove('StgLabel')
# except:
# if self.debugmode:
# raiseTEMP
# fomtuplist=[(k, numpy.nan) for k in self.fomnames]
# pass
#prepare union of all transitions with the parameter-specified modifications to transitions to creat fom names. None of the 3 midifications should be dientical but this is not checked.
alltransitions=[]
for filed in self.filedlist:
for tr in filed['batch_summary']['transitionslist']:#to preserve order
if not tr in alltransitions:
alltransitions+=[tr]
batchdictkey_appendstr=[(k, self.params[k.strip('%')+'_append']) for k in ['Inte', 'Wt%', 'At%']]
keymodfcn=lambda k, a:'%s.%s%s' %(k[:-1], k[-1:], a)
trfoms=[keymodfcn(tr, s) for k, s in batchdictkey_appendstr for tr in alltransitions]
self.fomnames=trfoms+self.fomnames
#go through each file and make the fomdlist entries for each samples_no therein, which may contain duplicate sample_no but those should be differentiated by runint
self.fomdlist=[]
for filed in self.filedlist:
fomd={}
trfomsmissing=copy.copy(self.fomnames)
for batchdictkey, appendstr in batchdictkey_appendstr:
for tk in filed['batch_summary']['transitionslist']:
savek=keymodfcn(tk, appendstr)
trfomsmissing.pop(trfomsmissing.index(savek))
fomd[savek]=filed['batch_summary'][batchdictkey][tk]
for savek in filed['batch_summary'].keys():
if savek in trfomsmissing:#for example StagX
trfomsmissing.pop(trfomsmissing.index(savek))
fomd[savek]=filed['batch_summary'][savek]
fomd['sample_no']=filed['batch_summary']['sample_no']
#this zips the fomd arrays into tuple lists. the second line makes a dict for eachof those, adding to it a list of NaN tuples for the missing keys, and the 1st adn 3rd lines add to that dict the common values
self.fomdlist+=[dict(\
dict(zip(self.fomnames+['sample_no'], tup)+[(missk, numpy.nan) for missk in trfomsmissing]), \
plate_id=filed['plate_id'], runint=int(filed['run'].partition('run__')[2]))\
for tup in zip(*[fomd[k] for k in self.fomnames+['sample_no']])]
self.csvheaderdict['plot_parameters']['plot__1']['fom_name']=trfoms[0]
allkeys=list(FOMKEYSREQUIREDBUTNEVERUSEDINPROCESSING)+self.fomnames#+self.strkeys_fomdlist#str=valued keys don't go into fomnames
self.writefom(destfolder, anak, anauserfomd=anauserfomd, strkeys_fomdlist=self.strkeys_fomdlist)#sample_no, plate_id and runint are explicitly required in csv selection above and are assume to be present here
class Analysis__PlatemapComps(Analysis_Master_nointer):
def __init__(self):
self.analysis_fcn_version='1'
self.dfltparams={'key_append_conc':'.PM.Loading', 'key_append_atfrac':'.PM.AtFrac', 'tot_conc_label':'Tot.PM.Loading', 'other_keys_to_include':'x,y,code'}
self.params=copy.copy(self.dfltparams)
self.analysis_name='Analysis__PlatemapComps'
self.requiredkeys=[]
self.optionalkeys=[]
self.requiredparams=['elements', 'map_id']
self.fomnames=[]
self.plotparams=dict({}, plot__1={})#copied in the default getapplicablefomfiles
self.csvheaderdict=dict({}, csv_version='1', plot_parameters={})#get for each csv during .perform()
self.description='calculate platemap compositions for inkj exp'
def getapplicablefilenames(self, expfiledict, usek, techk, typek, runklist=None, anadict=None, calcFOMDialogclass=None):#just a wrapper around getapplicablefomfiles to keep same argument format as other AnalysisClasses
if True in [not 'platemapdlist' in rund.keys() for runk, rund in calcFOMDialogclass.expfiledict.iteritems() if runk.startswith('run__')]:
#all platemaps must be available
self.filedlist=[]
return self.filedlist
self.num_files_considered, self.filedlist=getapplicable_runs_paramsonly(expfiledict, usek, runklist=None, requiredparams=self.requiredparams)
return self.filedlist
def processnewparams(self, calcFOMDialogclass=None):
self.fomnames=[]
def perform(self, destfolder, expdatfolder=None, writeinterdat=True, anak='', zipclass=None, anauserfomd={}, expfiledict=None):#must have same arguments as regular AnaylsisClass
self.initfiledicts()
for filed in self.filedlist:
try:
#if 1:
pid =filed['plate_id']
els=filed['elements'].split(',')
errbool, (cels_set_ordered, conc_el_chan)=get_multielementink_concentrationinfo(filed['runparamd'], els, return_defaults_if_none=True)#None if nothing to report, (True, str) if error, (False, (cels_set_ordered, conc_el_chan)) with the set of elements and how to caclualte their concentration from the platemap
if errbool:
raiseTemp
pmkeys_to_include=[k.strip() for k in self.params['other_keys_to_include'].split(',')]
pmpath=getplatemappath_plateid('', pmidstr=str(filed['map_id']), return_pmidstr=False)
#pmpath, pmidstr=r'J:\hte_jcap_app_proto\map\0068-04-0100-mp.txt', '69'#for 1-off override
platemapdlist=readsingleplatemaptxt(pmpath, erroruifcn=None)
tot_conc_label=None if len(self.params['tot_conc_label'])==0 else self.params['tot_conc_label']
calc_comps_multi_element_inks(platemapdlist, cels_set_ordered, conc_el_chan, key_append_conc=self.params['key_append_conc'], key_append_atfrac=self.params['key_append_atfrac'], tot_conc_label=tot_conc_label)
newfomnames=[el+self.params['key_append_conc'] for el in cels_set_ordered]+\
[el+self.params['key_append_atfrac'] for el in cels_set_ordered]+\
([] if tot_conc_label is None else [tot_conc_label])
newfomnames=[lab for lab in newfomnames if not (lab in FOMKEYSREQUIREDBUTNEVERUSEDINPROCESSING)]
self.fomnames=[k for k in (newfomnames+pmkeys_to_include) if k in platemapdlist[0]]
if 'code' in self.fomnames:
num_intfoms_at_start_of_fomdlist=1
self.fomnames.pop(self.fomnames.index('code'))
self.fomnames=['code']+self.fomnames
else:
num_intfoms_at_start_of_fomdlist=0
for d in platemapdlist:
d['plate_id']=filed['plate_id']
d['runint']=filed['runint']
self.strkeys_fomdlist=[]
allkeys=list(FOMKEYSREQUIREDBUTNEVERUSEDINPROCESSING)+self.fomnames
self.fomdlist=platemapdlist
except:
if self.debugmode:
raiseTEMP
print 'skipped calculation of ', pid
self.fomdlist=[]
continue
if len(self.fomdlist)==0:
print 'no foms calculated for ', fn
continue
plotfomname=self.params['tot_conc_label'] if self.params['tot_conc_label'] in self.fomnames else self.fomnames[0]
self.csvheaderdict['plot_parameters']['plot__1']=dict({}, fom_name=plotfomname, colormap='jet', colormap_over_color='(0.5,0.,0.)', colormap_under_color='(0.,0.,0.)')
self.writefom(destfolder, anak, anauserfomd=anauserfomd, strkeys_fomdlist=self.strkeys_fomdlist, num_intfoms_at_start_of_fomdlist=num_intfoms_at_start_of_fomdlist)#sample_no, plate_id and runint are explicitly required in csv selection above and are assume to be present here
```
#### File: JCAPDataProcess/AuxPrograms/quatcomp_plot_options.py
```python
import os, os.path
import sys
import numpy, copy, itertools
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#import operator
import matplotlib
#from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#try:
# from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
#except ImportError:
# from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
#from matplotlib.figure import Figure
#import numpy.ma as ma
#import matplotlib.colors as colors
#import matplotlib.cm as cm
#import matplotlib.mlab as mlab
#import pylab
#import pickle
#from fcns_math import *
#from fcns_io import *
#from fcns_ui import *
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
matplotlib.rcParams['backend.qt4'] = 'PyQt4'
wd=os.getcwd()
sys.path.append(os.path.join(PyCodePath,'PythonCompositionPlots'))
#from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
from quaternary_FOM_stackedtern2 import *
from quaternary_FOM_stackedtern5 import *
from quaternary_FOM_stackedtern20 import *
from quaternary_FOM_stackedtern30 import *
from quaternary_FOM_stackedtern9of100 import *
from quaternary_ternary_faces import *
from quaternary_faces_shells import *
from quaternary_folded_ternaries import *
class quatcompplotoptions():
def __init__(self, plotw, combobox, plotw3d=None, ellabels=['A', 'B', 'C', 'D'], plotwcbaxrect=None, include3doption=False):
self.ellabels=ellabels
self.plotw=plotw
self.plotw3d=plotw3d
self.plotwcbaxrect=plotwcbaxrect
self.plottypeComboBox = combobox
self.ternaryfaceoptions=[\
('layers of\ntern. shells', ternaryfaces_shells), \
('unfolded\ntern. slices', ternaryfaces_folded), \
('only tern.\nfaces', ternaryfaces), \
]
self.ternaryface_uiinds=[1, 2, 3]
self.stackedternoptions=[\
('20% interv\nternaries', (make5ternaxes, scatter_5axes), .2), \
('10% interv\nternaries', (make10ternaxes, scatter_10axes), .1), \
('5% interv\nternaries', (make20ternaxes, scatter_20axes), .05), \
('3.3% interv\nternaries', (make30ternaxes, scatter_30axes), .0333), \
('9 plots at\n1% interv', (make9of100ternaxes, scatter_9of100axes), .01), \
]
self.stackedtern_uiinds=[4, 5, 6, 7, 8]
if self.plotw3d or include3doption:
self.quat3doptions=[\
('3-D Quaternary', QuaternaryPlot), \
]
self.quat3d_uiinds=[9]
else:
self.quat3doptions=[]
self.quat3d_uiinds=[]
self.fillplotoptions()
def fillplotoptions(self):
self.plottypeComboBox.clear()
self.plottypeComboBox.insertItem(0, 'none')
for count, tup in enumerate(self.ternaryfaceoptions):
self.plottypeComboBox.insertItem(999, tup[0])
for count, tup in enumerate(self.stackedternoptions):
self.plottypeComboBox.insertItem(999, tup[0])
for count, tup in enumerate(self.quat3doptions):
self.plottypeComboBox.insertItem(999, tup[0])
self.plottypeComboBox.setCurrentIndex(1)
def loadplotdata(self, quatcomps, cols, nintervals=None, max_nintervals=30, comp1dindstocheck=[-1], negligible_comp_diff=.005):
self.cols=cols
self.quatcomps=quatcomps
if nintervals is None and len(self.quatcomps)>0:
# pairwisediffs=(((quatcomps[1:]-quatcomps[:-1])**2).sum(axis=1))**.5/2.**.5
# mindiff=(pairwisediffs[pairwisediffs>0.005]).min()
negligible_comp_diff2=negligible_comp_diff**2
mindiff=999.
for elind in comp1dindstocheck:
elconc=quatcomps[:, elind]
elconcdiff=numpy.abs(elconc[1:]-elconc[:-1])
elconcdiff=elconcdiff[elconcdiff>0]
if len(elconcdiff)==0:#all of them are equal
continue
mindiff=min(mindiff, elconcdiff.min())
mindiff2=mindiff**2
difflist=[((x0-x1)**2)**.5 for x0, x1 in itertools.combinations(elconc, 2) if (x0-x1)**2>negligible_comp_diff2 and (x0-x1)**2<mindiff2]
if len(difflist)>0:
mindiff=min(difflist)
if mindiff>1.:#actually menas 999 which means the mindiff was 0, all the checked comp axes were the same values within negligible_comp_diff
self.nintervals=5#count this as the ~minumum number of intervals
else:
self.nintervals=int(min(max_nintervals, round(1./mindiff)))
else:
self.nintervals=nintervals
def plot(self, plotw=None, plotw3d=None, **kwargs):
if plotw is None:
plotw=self.plotw
if plotw3d is None:
plotw3d=self.plotw3d
i=self.plottypeComboBox.currentIndex()
if i==0:
return None
if i in self.quat3d_uiinds:
if plotw3d is None:
plotw.redoaxes(projection3d=True, cbaxkwargs=dict({}, axrect=self.plotwcbaxrect))
plotw3d=plotw
self.cbax=plotw.cbax
else:
plotw3d.axes.cla()
selclass=self.quat3doptions[self.quat3d_uiinds.index(i)][1]
self.toComp=self.quat3dplot(plotw3d, selclass, **kwargs)
return True
plotw.fig.clf()
if i in self.ternaryface_uiinds:
selclass=self.ternaryfaceoptions[self.ternaryface_uiinds.index(i)][1]
self.toComp=self.ternaryfaceplot(plotw, selclass, **kwargs)
if i in self.stackedtern_uiinds:
makefcn, scatterfcn=self.stackedternoptions[self.stackedtern_uiinds.index(i)][1]
delta=self.stackedternoptions[self.stackedtern_uiinds.index(i)][2]
self.toComp=self.stackedternplot(plotw, makefcn, scatterfcn, delta, **kwargs)
return False
def quat3dplot(self, plotw3d, plotclass, **kwargs):
if 's' in kwargs.keys() and not isinstance(kwargs['s'], int):
kwargs['s']=18
tf=plotclass(plotw3d.axes, ellabels=self.ellabels)#, nintervals=self.nintervals)
tf.label()
tf.scatter(self.quatcomps, c=self.cols, **kwargs)
return lambda x, y, ax:None
def ternaryfaceplot(self, plotw, plotclass, **kwargs):
if not self.plotwcbaxrect is None:
plotw.axes=plotw.fig.add_axes((0, 0, self.plotwcbaxrect[0]-.01, 1))
self.cbax=plotw.fig.add_axes(self.plotwcbaxrect)
else:
plotw.axes=plotw.fig.add_axes((0, 0, 1, 1))
ax=plotw.axes
tf=plotclass(ax, nintervals=self.nintervals, ellabels=self.ellabels)
tf.label()
tf.scatter(self.quatcomps, self.cols, **kwargs)
return lambda x, y, ax: tf.toComp(x, y)
def stackedternplot(self, plotw, makefcn, scatterfcn, delta, drawcolorbarhere=False, **kwargs):
if 's' in kwargs.keys() and not isinstance(kwargs['s'], int):
kwargs['s']=18
plotw.fig.clf()
if self.plotwcbaxrect is None:
self.cbax=None
kwargs['cb']=False
elif drawcolorbarhere:
self.cbax=None#if drawing here cannot pass the cbax because scatterfcn doesn't return it
kwargs['cb']=True
kwargs['cbrect']=self.plotwcbaxrect
else:#going to draw colorbar externally so only make cbax here
self.cbax=plotw.fig.add_axes(self.plotwcbaxrect)
kwargs['cb']=False#do not make the colorbar in the scatterfcn
self.axl, self.stpl=makefcn(fig=plotw.fig, ellabels=self.ellabels)
scatterfcn(self.quatcomps, self.cols, self.stpl, edgecolor='none', **kwargs)
def toComp(x, y, ax, delta=delta, axl=copy.copy(self.axl)):
if not ax in axl:
return None
i=axl.index(ax)
dclick=delta*i
bclick=y*2./numpy.sqrt(3.)
aclick=1.-x-bclick/2.
cclick=1.-aclick-bclick
compclick=numpy.float64([aclick, bclick, cclick, dclick])
compclick[:3]*=1.-dclick
if numpy.all((compclick>=0.)&(compclick<=1.)):
return compclick
else:
return None
return toComp
#class plotwidget(FigureCanvas):
# def __init__(self, parent, width=12, height=6, dpi=72, projection3d=False):
#
# #plotdata can be 2d array for image plot or list of 2 1d arrays for x-y plot or 2d array for image plot or list of lists of 2 1D arrays
#
# self.fig=Figure(figsize=(width, height), dpi=dpi)
# if projection3d:
# self.axes=self.fig.add_subplot(111, navigate=True, projection='3d')
# else:
# self.axes=self.fig.add_subplot(111, navigate=True)
# self.mpl_connect('button_press_event', self.myclick)
#
# self.axes.hold(True)
# FigureCanvas.__init__(self, self.fig)
# self.setParent(parent)
# #self.parent=parent
# FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
# FigureCanvas.updateGeometry(self)
# #NavigationToolbar(self, parent)
# self.toolbar=NavigationToolbar(self, self)
#
#
# self.clicklist=[]
#
# def myclick(self, event):
# if not (event.xdata is None or event.ydata is None):
# arrayxy=[event.xdata, event.ydata]
# print 'clicked on image: array indeces ', arrayxy, ' using button', event.button
# self.clicklist+=[arrayxy]
# self.emit(SIGNAL("genericclickonplot"), [event.xdata, event.ydata, event.button])
#
```
#### File: JCAPDataProcess/AuxPrograms/xpss_create_rcp_exp_v2.py
```python
import string, copy
#import time
import os, os.path#, shutil
import sys
import numpy
#from PyQt4.QtCore import *
#from PyQt4.QtGui import *
import operator
import numpy as np
import re
os.chdir('K:\users\helge.stein\scripts-dev\JCAPDataProcess-master\AuxPrograms')
projectroot=os.path.split(os.getcwd())[0]
sys.path.append(projectroot)
sys.path.append(os.path.join(projectroot,'AuxPrograms'))
sys.path.append(os.path.join(projectroot,'QtForms'))
sys.path.append(os.path.join(projectroot,'AuxPrograms'))
sys.path.append(os.path.join(projectroot,'OtherApps'))
sys.path.append(os.path.join(projectroot,'BatchProcesses'))
sys.path.append(os.path.join(projectroot,'AnalysisFunctions'))
#from fcns_math import *
from fcns_io import *
from fcns_ui import *
class setup_rcp_and_exp_xpss():
def __init__(self, import_path, rcpext='.done', expext='.done',
overwrite_runs=False, plate_idstr=None, access='hte',
pmidstr=None, sample_no_from_position_index=lambda i:(1+i),
testmode=False):
"""
the xpss data should be saved so that once import_path is given,
this class knows what to do to copy and name all rcp and exp files
rcpext and expext should be set to .run for testing and then in standard
operation can make them .done
overwrite_runs can be set to True to help with debugging but safer to keep
as False in case there is a mistkae, really overwrite of a .done run is
not allowed but this is not checked
plate_idstr should be auto read but passing the string here overrides that
access for the data is set here and can be public,hte,tri,muri
pmidstr will be auto read from the .info file but if for some reason the
platempa used for xps is different this value can be overridden, which
is dangerous but the sample_no generated for the .rcp/.exp must
correspond to the pmidstr
sample_no_from_position_index should either be aq list of all the
sample_no in the order they were measured, or a lambda function like
the default value above when the sample_no were measured in order
"""
self.access=access
self.pmidstr=pmidstr
self.import_path=import_path
self.sample_no_from_position_index=sample_no_from_position_index
self.plate_idstr=plate_idstr
self.parse_spec_files()
self.datatype='xpss'
self.rcpext=rcpext
self.expext=expext
iserror=self.setup_folders(overwrite_runs)
if iserror:
return
self.setup_file_dicts()
self.add_all_files()
self.save_rcp_exp(testmode)
def setup_folders(self, overwrite_runs=True):
if self.pmidstr is None:
ans=getplatemappath_plateid(self.plate_idstr, erroruifcn=None, infokey='screening_map_id:', return_pmidstr=True)
if ans is None:
print 'aborting because failed retrieval of platemap id for plate :', self.plate_idstr
return True
self.pmidstr=ans[1]
#TODO !!!!!!!!!!
#dropfolder=self.getdropfolder_exptype(self.datatype)
dropfolder='K:/experiments/xpss/drop'
if dropfolder is None:
#messageDialog(None, 'Aborting SAVE because cannot find drop folder').exec_()
print 'Aborting SAVE because cannot find drop folder'
return True
if not os.path.isdir(dropfolder):
os.mkdir(dropfolder)
ellist=getelements_plateidstr(self.plate_idstr)
rcplab=''.join(ellist)
self.rcpmainfoldname='_'.join([timestampname()[:8], rcplab, get_serial_plate_id(self.plate_idstr)])
rcpmainfolder=os.path.join(dropfolder, self.rcpmainfoldname)
if not os.path.isdir(rcpmainfolder):
os.mkdir(rcpmainfolder)
self.runfolderpath=os.path.join(rcpmainfolder, self.data_acquisition_timestamp+self.rcpext)
if os.path.isdir(self.runfolderpath):
if overwrite_runs:
shutil.rmtree(self.runfolderpath)
else:
#messageDialog(None, 'Aborting SAVE because %s folder exists' %rcpmainfolder).exec_()
print 'Aborting SAVE because %s folder exists' %rcpmainfolder
return True
os.mkdir(self.runfolderpath)
return False
def get_block_list(self):
block_identifier = 'Dataset filename'
blockIDX = [i for i in range(len(self.lines)) if block_identifier in self.lines[i]]
mid_l = np.sort(np.unique(np.diff(np.array(blockIDX))))[1]
#get spectral blocks
line_pos = 0
self.pre_parsed = []
units = ['eV', 'm', 'Counts']
for i in range(1,len(blockIDX)):
if blockIDX[i]-blockIDX[i-1] == mid_l:
#then we have a spectral block and a dict containing all the info is created
#block = lines[line_pos:line_pos]
block_dict = {}
content = self.lines[blockIDX[i-1]:blockIDX[i]]
for c in content:
#strip away all unnessesary tabs and spaces
c = re.sub('[ \t]+' , ' ', c)
if len(c.split(' = '))>1:
key,val = c.split(' = ')
#this parses a list of values
if ', ' in val:
#numeric data
val = np.array(val.strip('{').strip('}\n').split(', ')).astype(np.float)
else:
#this parses positional information
if type(val) is not 'float':
for unit in units:
try:
if unit in val:
val = re.sub('[ \t]+' , ' ', val)
val = np.float(val.strip('\n').strip(unit))
except:
pass
block_dict[key] = val
self.pre_parsed.append(block_dict)
else:
#we have something else and go to next block
pass
#reminder: the for loop above seems to omit the image of the platen lets see if that is always nessesary
def get_all_params(self):
temp = {'technique':[],'passE':[],'stepE':[],'startE':[]}
self.xpsspec = []
for spec in self.pre_parsed[1::]:
#generate a technique name
method = spec[' 2 Scan type'].strip('\n').strip('F_')
#this looks strange but somehowit sometimes won't find
#the appropriate key for 3114 Chemical symbol or formula
ID_ChemKey = [i for i in range(len(spec.keys())) if '3113' in spec.keys()[i]]
element_an = spec[spec.keys()[ID_ChemKey[0]]].strip('\n')
transition = spec['3114 Transition or charge state'].strip('\n')
temp['technique'].append('{}-{}{}'.format(method,element_an,transition))
temp['passE'].append(spec[' 42 Pass energy'])
temp['stepE'].append(spec[' 4 Spectrum scan step size'])
temp['startE'].append(spec[' 3 Spectrum scan start'])
self.xpsspec.append(spec[' 12 Ordinate values'])
self.allparams = temp
def get_params(self):
self.get_all_params()
self.params = {key:[] for key in self.allparams.keys()}
#attention: this returns the techniques alphabetically sorted
#and not how they appear in the file so ID sort needs to be performed
val,IDx = np.unique(self.allparams['technique'],return_index=True)
for ID in IDx[np.argsort(IDx)]:
for key in self.allparams.keys():
self.params[key].append(self.allparams[key][ID])
def parse_spec_files(self):
self.lines = [line for line in open(self.import_path)]
self.get_block_list() #get the complete file as a list containing dicts
#set the plateid
if self.plate_idstr is None:#ideally plate_id is read from the filename or spec file because it was entered
#by user when starting data acquisition, but if it was passed in
#the class init then ignore
IDStr = self.import_path.strip('.cal')[-4:]
try:
IDNum = np.int(IDStr)#
self.plate_idstr=self.import_path.strip('.cal')[-4:]#forcing naming convention i.e. 4080.cal
except(ValueError):
print 'Filename {} not correct!'.format(IDStr)
#generate meta file in which all important info is stored
#in meta now the infor for each spectrum is stored
self.get_params()
#assuming these are unique technique names
#print('Techniques: {}'.format(self.params['technique']))
self.technique_names=self.params['technique']
#parse date
from dateutil.parser import parse
self.data_acquisition_timestamp=parse(self.pre_parsed[0][' 151 Date Acquired']).strftime('%d%y%m.%H%M%S')
self.run_params_dict = {}
for key in self.params.keys():
if key == 'technique':
self.technique_names = self.params['technique']
self.run_params_dict['technique_names'] = self.technique_names
else:
self.run_params_dict[key]=self.params[key]
def strrep_generic_file_dict_value_sp(self,v):
return filterchars(str(v), valid_chars = "/<>-_.,; ()[]{}/%s%s%s" % (string.ascii_letters, string.digits,''.join(['\\','%','&','^','!','#','*'])))
def setup_file_dicts(self):
self.expdict={}
self.expdict['experiment_type']=self.datatype
self.expdict['exp_version']='3'
self.expdict['description']='%s run on plate_id %s with %s' %(self.datatype, self.plate_idstr, ','.join(self.technique_names))
self.expdict['created_by']=self.datatype
self.expdict['access']=self.access
runcount=0
runk='run__%d' %(runcount+1)
self.expdict[runk]={}
exprund=self.expdict[runk]
self.rcpdict={}
rcpdict=self.rcpdict
rcpdict['experiment_type']=self.datatype
rcpdict['technique_name']=self.datatype#don't pay attention to this "technique_name" it is an artifact of previous data and does not have the same meaning as e.g. XPSSURVEY
rcpdict['rcp_version']='2'
self.add_run_attr=lambda k, v:[d.update({k:v}) for d in [exprund, rcpdict]]
self.add_run_attr('screening_map_id', self.pmidstr)
self.add_run_attr('run_use', 'data')
self.add_run_attr('plate_id', self.plate_idstr)
self.add_run_attr('name', self.data_acquisition_timestamp)
compname='HTE-XPSS-01'
self.add_run_attr('computer_name', compname)
exprund['run_path']=r'/%s/%s/%s/%s' %(self.datatype, compname.lower(), self.rcpmainfoldname, rcpdict['name']+self.rcpext)
exprund['rcp_file']=rcpdict['name']+'.rcp'
rcpdict['parameters']={}
exprund['parameters']={}
self.add_run_param=lambda k, v:[d.update({k:v}) for d in [exprund['parameters'], rcpdict['parameters']]]
self.add_run_param('plate_id', self.plate_idstr)
for k, v in self.run_params_dict.iteritems():
v=self.strrep_generic_file_dict_value_sp(v).strip('[').rstrip(']')#make lists comma delimited but without the brackets
self.add_run_param(k, v)
techdlist=[]
for count, tech in enumerate(['XPSS']+self.technique_names):
tk='files_technique__%s' %tech
exprund[tk]={}
rcpdict[tk]={}
if count==0:
exprund[tk]['kratos_files']={}
rcpdict[tk]['kratos_files']={}
xpsstechd=(rcpdict[tk]['kratos_files'], exprund[tk]['kratos_files'])
else:
exprund[tk]['pattern_files']={}
rcpdict[tk]['pattern_files']={}
techdlist+=[(rcpdict[tk]['pattern_files'], exprund[tk]['pattern_files'])]
#TODO: unsure about this line
self.add_kratos_file=lambda fn:[d.update({fn:strrep_filed_createflatfiledesc('',{'file_type':'xpss_kratos_file'})}) for d in xpsstechd]
#self.add_kratos_file=lambda fn:[d.update({fn:strrep_filed_createflatfiledesc({'file_type':'xpss_kratos_file'})}) for d in xpsstechd]
#self.add_meta_file=lambda fn:[d.update({fn:filed_createflatfiledesc({'file_type':'xpss_kratos_file'})}) for d in xpsstechd]
self.pattern_file_keys=['BE(eV)','Intensity']
self.add_pattern_file=lambda tech, fn, nrows, sample_no:[d.update({fn:strrep_filed_createflatfiledesc('',{'file_type':'xpss_spectrum_csv','keys':self.pattern_file_keys,'num_header_lines':1,'num_data_rows':nrows,'sample_no':sample_no})}) for d in techdlist[self.technique_names.index(tech)]]
def save_rcp_exp(self, testmode):
rcpfilestr=strrep_filedict(self.rcpdict)
p=os.path.join(self.runfolderpath, self.rcpdict['name']+'.rcp')
if testmode:
print 'THIS IS THE RCP FILE THAT WOULD BE SAVED:'
print rcpfilestr
return
with open(p, mode='w') as f:
f.write(rcpfilestr)
#print 'rcp file saved to ', p
saveexp_txt_dat(self.expdict, saverawdat=False, experiment_type=self.datatype, rundone=self.expext, file_attr_and_existence_check=False)
#print 'exp file saved to ', dsavep
def add_all_files(self):
self.import_path#this path should be all that's necessary to
position_index_of_file_index=lambda fi:fi//len(self.technique_names)
sample_no_of_file_index=lambda fi:self.sample_no_from_position_index[position_index_of_file_index[position_index_of_file_index(fi)]] if isinstance(self.sample_no_from_position_index, list) else self.sample_no_from_position_index(position_index_of_file_index(fi))
kratosfn=self.import_path
fns=['{}_XPS{}.csv'.format(sample_no_of_file_index(i),self.allparams['technique'][i]) for i in range(len(self.xpsspec))]
self.add_kratos_file(kratosfn)
for fileindex, fn in enumerate(fns):
sample_no=sample_no_of_file_index(fileindex)
tech=self.technique_names[fileindex%len(self.technique_names)]
self.add_pattern_file(tech, fn, len(self.xpsspec[fileindex]), sample_no)
y = self.xpsspec[fileindex]
#excitation energy is hardcoded to 1486.6 eV
x = np.array([1486.6-self.allparams['startE'][fileindex]+i*self.allparams['stepE'][fileindex] for i in range(len(self.xpsspec[fileindex]))])
sav = np.array([x,y]).T
np.savetxt(os.path.join(self.runfolderpath,fn),sav,delimiter=',',fmt='%3.4f, %0.0f',header=','.join(self.pattern_file_keys))
##example
#import_path = 'K:/experiments/xpss/user/MnFeCoNiCuZn/4082.cal'
#xpsimportclass=setup_rcp_and_exp_xpss(import_path, rcpext='.run', expext='.run', overwrite_runs=True, plate_idstr=None, access='tri', pmidstr=None, sample_no_from_position_index=lambda i:(1+i), testmode=True)
```
#### File: JCAPDataProcess/BatchProcesses/DR_v1.py
```python
skiponerror=1
batchfilepath=r'.batch'
import sys, os
############
projectroot=os.path.split(os.getcwd())[0]
sys.path.append(projectroot)
sys.path.append(os.path.join(projectroot,'QtForms'))
sys.path.append(os.path.join(projectroot,'AuxPrograms'))
sys.path.append(os.path.join(projectroot,'OtherApps'))
from CreateExperimentApp import expDialog
from CalcFOMApp import calcfomDialog
from VisualizeDataApp import visdataDialog
#from CombineFomApp import combinefomDialog
#from FileSearchApp import filesearchDialog
#from FileManagementApp import filemanDialog
from fcns_io import *
from SaveImagesApp import *
from VisualizeBatchFcns import batch_plotuvisrefs
batchfolder=r'K:\users\sksuram\uvis_batchtests\DR'
batchinput_fn='15635_postacid.txt'
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True):#, TreeWidg):
super(MainMenu, self).__init__(None)
self.setWindowTitle('HTE Experiment and FOM Data Processing')
self.expui=expDialog(self, title='Create/Edit an Experiment')
self.calcui=calcfomDialog(self, title='Calculate FOM from EXP', guimode=False)
self.visdataui=visdataDialog(self, title='Visualize Raw, Intermediate and FOM data')
mainapp=QApplication(sys.argv)
form=MainMenu(None)
#form.show()
#form.setFocus()
#mainapp.exec_()
expui=form.expui
calcui=form.calcui
visdataui=form.visdataui
batchfilepath=os.path.join(batchfolder,batchinput_fn)
batch_stdout_path=batchfilepath.rpartition('.')[0]+'.out'
#sys.stdout=open(batch_stdout_path,'w')
#T_path: <>; exp_path: <>; ana_path: <>
with open(batchfilepath, mode='r') as f:
batchlines=f.readlines()
logfilepath=batchfilepath.rpartition('.')[0]+'.log'
#runsrcfolder=tryprependpath(RUNFOLDERS, r'uvis\hte-uvis-02', testfile=False, testdir=True)
runsrcfolder=tryprependpath(RUNFOLDERS, '', testfile=False, testdir=True).rstrip(os.sep)
#update these to uvis when ready to run for real
expdestchoice=r'uvis'
anadestchoice=r'uvis'
#use these to create .exp or .ana even if in batch file
forceexp=False
forceana=False
getpvdbool=True
def getbatchlinepath(linestr, key='T_path'):
return linestr.partition(key)[2].strip(':').strip().partition(';')[0].strip()
loglines=[fn for fn in batchlines]
def updatelog(i, s):
s=s.strip()
if len(s)==0:
return
if len(loglines[i].strip())>0:
lst=[loglines[i].strip()]
else:
lst=[]
lst+=[s]
loglines[i]=';'.join(lst)
with open(logfilepath, mode='w') as f:
f.write('\n'.join(loglines))
def batch_pvdbool(fn):
pT=os.path.join(runsrcfolder, fn)
serialno=pT.rpartition('_')[2]
plateidstr=serialno[:-1]
infofn=plateidstr+'.info'
p=tryprependpath(PLATEFOLDERS, os.path.join(plateidstr, infofn), testfile=True, testdir=False)
if len(p)==0:
if skiponerror:
return 'ERROR - info file not found for %s' %plateidstr, False
else:
raiseerror
with open(p, mode='r') as f:
lines=f.readlines()
infofiled=filedict_lines(lines)
methods=[v3 for k, v in infofiled.iteritems() if k.startswith('prints') for k2, v2 in v.iteritems() if k2.startswith('prints') for k3, v3 in v2.iteritems() if k3.startswith('method')]
return '', ('PVD' in methods, )
def batch_exp(fn, expui=expui):
expui.removeruns()
pDR=os.path.join(runsrcfolder, fn)
if not os.path.isdir(pDR):
if skiponerror:
return 'ERROR - cannot find file %s' %pDR, False
else:
raiseerror
for p in [pDR]:
for zfn in os.listdir(p):
if not zfn.endswith('.zip'):
continue
expui.importruns_folder(folderp=os.path.join(p, zfn))
expui.batchuvissingleplate_norefdata()
if (not 'experiment_type' in expui.expfiledict.keys()) or len(expui.expfilestr)==0 or not 'exp_version' in expui.expfilestr:
if skiponerror:
return 'ERROR - betchexp failed for %s' %pDR, False
else:
raiseerror
saveexpfiledict, exppath=expui.saveexp(exptype=expdestchoice, rundone='.done')
return 'exp_path: %s' %exppath, (saveexpfiledict, exppath)
def select_ana_fcn(calcui, analabel):
cb=calcui.AnalysisNamesComboBox
#print cb.count()
for i in range(1, int(cb.count())):
#print (str(cb.itemText(i)).partition('(')[0].partition('__')[2])
if (str(cb.itemText(i)).partition('(')[0].partition('__')[2])==analabel:
cb.setCurrentIndex(i)
calcui.getactiveanalysisclass()
return True
return False
for batchcount, batchline in enumerate(batchlines):
print batchline
expbool=False
if forceexp or not 'exp_path' in batchline:
rawfn=getbatchlinepath(batchline, key='DR_path').lstrip(os.sep)
logstr, tupbool=batch_exp(rawfn)
updatelog(batchcount, logstr)
if not tupbool:#error so False passed or empty tuple
continue
expfiledict, exppath=tupbool
updatelog(batchcount, 'exp_path: %s' %exppath)
expbool=True
elif getpvdbool:
rawfn=getbatchlinepath(batchline, key='DR_path')
if getpvdbool:
logstr, tupbool=batch_pvdbool(rawfn)
updatelog(batchcount, logstr)
if not tupbool:#error so False passed or empty tuple
continue
pvdbool=tupbool[0]
anabool=False
if forceana or not 'ana_path' in batchline:
if expbool:
calcui.importexp(expfiledict=expfiledict, exppath=exppath)
for runk, rund in calcui.expfiledict.iteritems():#copy over any platemap info
if not runk.startswith('run__'):
continue
rcpfile=rund['rcp_file']
rcpdl=[rcpd for rcpd in expui.rcpdlist if rcpd['rcp_file']==rcpfile and len(rcpd['platemapdlist'])>0]
if len(rcpdl)>0:
rund['platemapdlist']=copy.copy(rcpdl[0]['platemapdlist'])
else:
exppath=getbatchlinepath(batchline, key='exp_path')
calcui.importexp(exppath=exppath)#relative path ok
calcui.autoplotCheckBox.setChecked(False)
for analabel in ['DR_UVVIS', 'BG']:#TODO: for BG run on the ana
if not select_ana_fcn(calcui, analabel):
if skiponerror:
updatelog(batchcount, 'ERROR-Analysis %s not available' %analabel)
continue
else:
raiseerror
calcuierror=calcui.analyzedata()#return False if ok otherwise stringh error message
if calcuierror:
if skiponerror:
updatelog(batchcount, 'ERROR-%s' %calcuierror)
continue
else:
raiseerror
anasavefolder=calcui.saveana(dontclearyet=True, anatype=anadestchoice, rundone='.run')
calcui.viewresult(anasavefolder=anasavefolder, show=False)
updatelog(batchcount, 'ana_path: %s' %anasavefolder)
anabool=True
if not anabool and 'ana_path' in batchline:#didn't calculate ana here but ened to load it
anapath=getbatchlinepath(batchline, key='ana_path')
visdataui.importana(p=anapath)
if visdataui.numStdPlots==0:
if skiponerror:
updatelog(batchcount, 'ERROR- No standard plots in vis')
continue
else:
raiseerror
comboind_strlist=[]
for i in range(1, visdataui.numStdPlots+1):
visdataui.stdcsvplotchoiceComboBox.setCurrentIndex(i)
comboind_strlist+=[(i, str(visdataui.stdcsvplotchoiceComboBox.currentText()))]
for tech in ['DR_UVVIS']:
batch_plotuvisrefs(visdataui, tech=tech)
idialog=visdataui.savefigs(save_all_std_bool=False, batchidialog=None, lastbatchiteration=False, filenamesearchlist=[['xy']], justreturndialog=True, prependstr=tech)
idialog.doneCheckBox.setChecked(False)
idialog.ExitRoutine()
if idialog.newanapath:
visdataui.importana(p=idialog.newanapath)
batchidialog=saveimagesbatchDialog(None, comboind_strlist)
fnsearchle='plate_id__'
if not pvdbool:#if PVD bool then don't save composition plots
fnsearchle+=',code__'
batchidialog.filenamesearchLineEdit.setText(fnsearchle)
batchidialog.ExitRoutine()
visdataui.save_all_std_plots(batchidialog=batchidialog)
#save version of FOM plots with 1.6 to 2.6 eV range
visdataui.colormapLineEdit.setText('jet_r')
visdataui.vminmaxLineEdit.setText('1.6,2.6')
visdataui.belowrangecolLineEdit.setText('(1,0.5,0.5)')
visdataui.aboverangecolLineEdit.setText('(0.3,0,0.5)')
batchidialog.plotstyleoverrideCheckBox.setChecked(True)
batchidialog.prependfilenameLineEdit.setText('1.6to2.6')
fnsearchle='plate_id__&bg_repr'
if not pvdbool:#if PVD bool then don't save composition plots
fnsearchle+=',code__&bg_repr'
batchidialog.filenamesearchLineEdit.setText(fnsearchle)
batchidialog.doneCheckBox.setChecked(False)
batchidialog.ExitRoutine()
visdataui.save_all_std_plots(batchidialog=batchidialog)
updatelog(batchcount, 'images saved')
```
#### File: JCAPDataProcess/BatchProcesses/merge_xrfs_into_ana_v6_xrds_with_resamp_modinplace_2frame.py
```python
import numpy, copy, operator
if __name__ == "__main__":
import os, sys
sys.path.append(os.path.split(os.path.split(os.path.realpath(__file__))[0])[0])
sys.path.append(os.path.join(os.path.split(os.path.split(os.path.realpath(__file__))[0])[0], 'AuxPrograms'))
sys.path.append(os.path.join(os.path.split(os.path.split(os.path.realpath(__file__))[0])[0], 'AnalysisFunctions'))
#import matplotlib.pyplot as plt
from fcns_io import *
from fcns_ui import *
from CalcFOMApp import calcfomDialog
from Analysis_Master import Analysis_Master_nointer
from create_udi_standalone import append_udi_to_ana, append_resampled_merged_patterns_to_ana, smoothfcn
analysismasterclass=Analysis_Master_nointer()
processed_patterns=True
include_1st_frame_solo=False
merge_first=True
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True):#, TreeWidg):
super(MainMenu, self).__init__(None)
self.calcui=calcfomDialog(self, title='Calculate FOM from EXP', guimode=False, modifyanainplace=True)
mainapp=QApplication(sys.argv)
form=MainMenu(None)
calcui=form.calcui
calcui.getplatemapCheckBox.setChecked(True)
folder=r'L:\processes\analysis\xrds'
#fns=[fn for fn in os.listdir(folder) if fn.endswith('.run')]#20170922.125037.run and 20170922.124034.run
if 0:
xrdsnames=r'xrds\20180524.122602.run'.split(',')
xrfsnames=r'xrfs\20180524.153248.done'.split(',')
elif 0:
xrdsnames=r'xrds\20180524.130338.run'.split(',')
xrfsnames=r'xrfs\20180524.152919.done'.split(',')
elif 1:
xrdsnames=r'xrds\20180524.133941.run'.split(',')
xrfsnames=r'xrfs\20180524.154943.done'.split(',')
fns=[tup for tup in zip(xrdsnames, xrfsnames)]
for fn_or_tup in fns:
print 'starting ', fn_or_tup
if isinstance(fn_or_tup, str):
anafolder=os.path.join(folder, fn)
# if True in [s.startswith('ana__6_') for s in os.listdir(anafolder)]:
# print 'skipping becuase already done ', fn
p=os.path.join(anafolder, fn.rpartition('.')[0]+'.ana')
else:
p=buildanapath(fn_or_tup[0])
num_ana_for_resamp=(int(include_1st_frame_solo)+1)*(int(processed_patterns)+1)
resamp_ana_are_processed=[False]*num_ana_for_resamp
if processed_patterns:
resamp_ana_are_processed[0]=True
if include_1st_frame_solo:
resamp_ana_are_processed[2]=True
#ana__1 to 4 should be created by data import. then create ana__5 to 8 here
if merge_first and processed_patterns:#should generate 5,6,7,8 or just 5,6
append_resampled_merged_patterns_to_ana(l_anapath=[p, p], l_anak_patterns=['ana__1', 'ana__1'], l_pattern_fn_search_str=['1st_frame', '2nd_frame'], pattern_key='pattern_files', q_key='q.nm_processed',intensity_key='intensity.counts_processed', dq=None, q_log_space_coef=1.00235198, resamp_interp_order=3, pre_resamp_smooth_fcn=smoothfcn)
append_resampled_merged_patterns_to_ana(l_anapath=[p, p], l_anak_patterns=['ana__2', 'ana__2'], l_pattern_fn_search_str=['1st_frame', '2nd_frame'], pattern_key='pattern_files', q_key='q.nm',intensity_key='intensity.counts', dq=None, q_log_space_coef=1.00235198, resamp_interp_order=3, pre_resamp_smooth_fcn=smoothfcn)
if include_1st_frame_solo:
append_resampled_merged_patterns_to_ana(l_anapath=[p], l_anak_patterns=['ana__1'], l_pattern_fn_search_str=['1st_frame'], pattern_key='pattern_files', q_key='q.nm_processed',intensity_key='intensity.counts_processed', dq=None, q_log_space_coef=1.00235198, resamp_interp_order=3, pre_resamp_smooth_fcn=smoothfcn)
append_resampled_merged_patterns_to_ana(l_anapath=[p], l_anak_patterns=['ana__2'], l_pattern_fn_search_str=['1st_frame'], pattern_key='pattern_files', q_key='q.nm',intensity_key='intensity.counts', dq=None, q_log_space_coef=1.00235198, resamp_interp_order=3, pre_resamp_smooth_fcn=smoothfcn)
elif merge_first and not processed_patterns: #should generate 4,5 or just 4
append_resampled_merged_patterns_to_ana(l_anapath=[p, p], l_anak_patterns=['ana__2', 'ana__2'], l_pattern_fn_search_str=['1st_frame', '2nd_frame'], pattern_key='pattern_files', q_key='q.nm',intensity_key='intensity.counts', dq=None, q_log_space_coef=1.00235198, resamp_interp_order=3, pre_resamp_smooth_fcn=smoothfcn)
if include_1st_frame_solo:
append_resampled_merged_patterns_to_ana(l_anapath=[p], l_anak_patterns=['ana__2'], l_pattern_fn_search_str=['1st_frame'], pattern_key='pattern_files', q_key='q.nm',intensity_key='intensity.counts', dq=None, q_log_space_coef=1.00235198, resamp_interp_order=3, pre_resamp_smooth_fcn=smoothfcn)
calcui.importana(p=p)
anakeys_after_append=sort_dict_keys_by_counter(calcui.anadict, keystartswith='ana__')
resamp_anakeys=anakeys_after_append[-num_ana_for_resamp:]
print 'after merge then reimport ', calcui.anadict['experiment_name']
pidstr=calcui.anadict['plate_ids']
if ',' in pidstr:
print 'skipping ', fn, pidstr
continue
if isinstance(fn_or_tup, str):
infod=importinfo(pidstr)
#for ank in sort_dict_keys_by_counter(infod['analyses'], keystartswith='analyses__'):#use
analysesd=infod['analyses']
xrfstups=sorted([(time.strptime(v['created_at'].rpartition(' ')[0],'%Y-%m-%d %H:%M:%S'), v) for k, v in analysesd.iteritems() if v['type']=='xrfs'])
if len(xrfstups)==0:
print 'no xrfs data for ', fn
continue
relapth_xrfsana=xrfstups[-1][1]['path']#latest created_at analyses__ dict
print 'using xrfs ', relapth_xrfsana
xrfsp=relapth_xrfsana
else:
xrfsp=buildanapath(fn_or_tup[1])
calcui.importauxexpana(xrfsp, exp=False)
for i in range(1, int(calcui.FOMProcessNamesComboBox.count())):
if (str(calcui.FOMProcessNamesComboBox.itemText(i)).partition('(')[0])=='Analysis__FOM_Interp_Merge_Ana':
calcui.FOMProcessNamesComboBox.setCurrentIndex(i)
calcui.getactiveanalysisclass()
calcui.processeditedparams()
break
#calcui.exec_()
c=calcui.analysisclass
c.params['select_aux_keys']='AtFrac'
c.params['select_aux_ints']='2'
c.params['interp_is_comp']=1
c.processnewparams(calcFOMDialogclass=calcui, recalc_filedlist=True)
tempnum=len(sort_dict_keys_by_counter(calcui.anadict, keystartswith='ana__'))
calcui.analyzedata()
anakeys=sort_dict_keys_by_counter(calcui.anadict, keystartswith='ana__')
if len(anakeys)==tempnum:
print '***; %s; %s' %(relpath_xrfsana, fn)
#continue
calcui.exec_()
xrfsmergedanak=anakeys[-1]
#continue#this skips all file writing until the xrfs ana are fixed
newanasavefolder=calcui.saveana(dontclearyet=False, anatype='xrds', rundone='.run')
newanapath=buildanapath(newanasavefolder)
for anak_patterns, isprocessed in zip(resamp_anakeys, resamp_ana_are_processed):
q_key='q.nm_processed_resampled' if isprocessed else 'q.nm_resampled'
intensity_key='intensity.counts_processed_resampled' if isprocessed else 'intensity.counts_resampled'
append_udi_to_ana(l_anapath=[newanapath], l_anak_comps=[xrfsmergedanak], l_anak_patterns=[anak_patterns], pattern_key='pattern_files', compkeys='AtFrac', q_key=q_key,intensity_key=intensity_key)
#break#have only tested modify in place with a single .ana at a time
```
#### File: JCAPDataProcess/BatchProcesses/process_eche_4led_cv_v9_Toggle.py
```python
import sys
import os
from time import sleep
############
projectroot = os.path.split(os.getcwd())[0]
sys.path.append(projectroot)
sys.path.append(os.path.join(projectroot, 'QtForms'))
sys.path.append(os.path.join(projectroot, 'AuxPrograms'))
sys.path.append(os.path.join(projectroot, 'OtherApps'))
from DBPaths import *
from SaveImagesApp import *
from fcns_ui import *
from fcns_io import *
from VisualizeBatchFcns import choosexyykeys
from VisualizeDataApp import visdataDialog
from CalcFOMApp import calcfomDialog
from CreateExperimentApp import expDialog
from merge_interp_xrfs_single_plate_id import merge_interp_xrfs_single_plate_id
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True): # , TreeWidg):
super(MainMenu, self).__init__(None)
self.setWindowTitle('HTE Experiment and FOM Data Processing')
self.expui = expDialog(self, title='Create/Edit an Experiment')
self.calcui = calcfomDialog(
self, title='Calculate FOM from EXP', guimode=False)
self.visdataui = visdataDialog(
self, title='Visualize Raw, Intermediate and FOM data', GUIMODE=False)
def visui_exec(self, show=False):
if self.visdataui is None:
self.visdataui = visdataDialog(
self, title='Visualize Raw, Intermediate and FOM data')
if show:
self.visdataui.show()
def visexpana(self, anafiledict=None, anafolder=None, experiment_path=None, show=False):
self.visui_exec(show=show)
if not (anafiledict is None or anafolder is None):
self.visdataui.importana(
anafiledict=anafiledict, anafolder=anafolder)
elif not experiment_path is None:
self.visdataui.importexp(experiment_path=experiment_path)
def process_run(rf, ksl):
runfolder = rf
keepsmplist = ksl
runfoldername = os.path.join('eche', 'hte-eche-05', '%s' % (runfolder))
keepsmpstr = ','.join([str(x) for x in keepsmplist])
mainapp = QApplication(sys.argv)
form = MainMenu(None)
expui = form.expui
expui.SampleListLineEdit.setText(keepsmpstr)
calcui = form.calcui
visdataui = form.visdataui
def select_ana_fcn(calcui, analabel):
calcui.FOMProcessNamesComboBox.setCurrentIndex(0)
cb = calcui.AnalysisNamesComboBox
for i in range(1, int(cb.count())):
if (str(cb.itemText(i)).partition('(')[0].partition('__')[2]) == analabel:
cb.setCurrentIndex(i)
calcui.getactiveanalysisclass()
return True
return False
def select_procana_fcn(calcui, analabel):
cb = calcui.FOMProcessNamesComboBox
for i in range(1, int(cb.count())):
if (str(cb.itemText(i)).partition('(')[0].partition('__')[2]) == analabel:
cb.setCurrentIndex(i)
calcui.getactiveanalysisclass()
return True
return False
def updateanalysisparams(calcui, paramd):
calcui.analysisclass.params.update(paramd)
calcui.processeditedparams()
def select_techtype(searchstr):
qlist = calcui.TechTypeButtonGroup.buttons()
typetechfound = False
for button in qlist:
if searchstr in str(button.text()).strip():
button.setChecked(True)
typetechfound = True
break
calcui.fillanalysistypes(
calcui.TechTypeButtonGroup.checkedButton())
if not typetechfound:
calcui.exec_()
raiseerror
def plot_new_fom(visdataui, fom_name):
cb = visdataui.fomplotchoiceComboBox
for i in range(0, int(cb.count())):
if str(cb.itemText(i)) == fom_name:
cb.setCurrentIndex(i)
visdataui.filterandplotfomdata()
return True
return False
if expname is None:
if os.path.isdir(runfoldername):
runsrcfolder = runfoldername
else:
runsrcfolder = tryprependpath(RUNFOLDERS, runfoldername)
expui.importruns_folder(folderp=runsrcfolder)
expui.ExpTypeLineEdit.setText('eche')
expui.UserNameLineEdit.setText('eche')
expui.savebinaryCheckBox.setChecked(False)
expui.RunTypeLineEdit.setText('data')
mainitem = expui.techtypetreefcns.typewidgetItem
for i in range(mainitem.childCount()):
mainitem.child(i).setCheckState(0, Qt.Checked)
expui.editexp_addmeasurement()
saveexpfiledict, exppath = expui.saveexp(
exptype=expdestchoice, rundone=expsaveextension)
else:
saveexpfiledict = None
exppath = buildexppath(expname)
print exppath
analysis_to_do_tups = [
('CA1', 'Iphoto', False, {'illum_key': illum_key}, True), ('CA2', 'Iphoto', False, {'illum_key': illum_key}, True), (
'CA3', 'Iphoto', False, {'illum_key': illum_key}, True), ('CA4', 'Iphoto', False, {'illum_key': illum_key}, True),
('CA1', 'SpectralPhoto', False, {}, False),
('CV5', 'Iphoto', False, {'illum_key': illum_key}, False),
('CV5', 'Pphotomax', False, {
'v_extend_lower': -0.1, 'v_extend_upper': 0, 'sweep_direction': 'anodic'}, True),
('CV5', 'Pphotomax', False, {
'v_extend_lower': .03, 'v_extend_upper': 0, 'sweep_direction': 'anodic'}, True),
('CV5', 'Pphotomax', False, {
'v_extend_lower': -0.1, 'v_extend_upper': 0, 'sweep_direction': 'cathodic'}, True),
('CV5', 'Pphotomax', False, {
'v_extend_lower': .03, 'v_extend_upper': 0, 'sweep_direction': 'cathodic'}, True),
]
if ananame is None:
calcui.importexp(exppath=exppath)
currentana = 1
for count, (techtypesearch, ana_fcn, isprocess, paramd, cm2convertbool) in enumerate(analysis_to_do_tups):
print 'calculating ana__%s, %s' % (currentana, ana_fcn)
select_techtype(techtypesearch)
if isprocess:
if not select_procana_fcn(calcui, ana_fcn):
calcui.exec_()
raiseerror
else:
if not select_ana_fcn(calcui, ana_fcn):
calcui.exec_()
raiseerror
if len(paramd) > 0:
updateanalysisparams(calcui, paramd)
print 'parameters updated, performing calculation'
calcuierror = calcui.analyzedata()
currentana += 1
if calcuierror:
calcui.exec_()
raiseerror
if cm2convertbool:
print 'converting to m*/cm2'
calcui.batch_set_params_for_photo_mAcm2_scaling(
measurement_area=measurement_area_override)
calcuierror = calcui.analyzedata()
currentana += 1
if calcuierror:
calcui.exec_()
raiseerror
pidstr =`calcui.expfiledict['run__1']['parameters']['plate_id']`
merge_interp_xrfs_single_plate_id(calcui, ananame=None, pidstr=pidstr, l_anak_to_merge=[
'ana__2', 'ana__4', 'ana__6', 'ana__8', 'ana__12'], save_extension=None)
anasavefolder = calcui.saveana(
dontclearyet=True, anatype=anadestchoice, rundone='.run')
calcui.viewresult(anasavefolder=anasavefolder, show=False)
else:
anapath = buildanapath(ananame)
anasavefolder = os.path.split(anapath)[0]
visdataui.importana(p=anapath)
visdataui.stdcsvplotchoiceComboBox.setCurrentIndex(9)
visdataui.plot_preparestandardplot()
choosexyykeys(visdataui, ['E.eV_illum', 'EQE', 'None'])
for fn, filed in visdataui.anafiledict['ana__9']['files_multi_run']['sample_vector_files'].iteritems():
p = os.path.join(anasavefolder, fn)
vectrofiled = readcsvdict(
p, filed, returnheaderdict=False, zipclass=None, includestrvals=False, delim=',')
if numpy.all(vectrofiled['EQE'] > mineqeforplot):
filed['path'] = os.path.join(anasavefolder, fn)
filed['zipclass'] = False
visdataui.plotxy(filed=filed)
imagesidialog = visdataui.savefigs(justreturndialog=True)
imagesidialog.widget_plow_dlist[0]['item'].setCheckState(
0, Qt.Unchecked) # plate
imagesidialog.widget_plow_dlist[1]['item'].setCheckState(
0, Qt.Unchecked) # code
imagesidialog.widget_plow_dlist[2]['item'].setCheckState(
0, Qt.Checked) # xy
imagesidialog.prependfilenameLineEdit.setText(
'ana__9-sample%d-' % filed['sample_no'])
imagesidialog.ExitRoutine()
stdplotinds = [2, 4, 6, 8, 12, 14, 16, 18]
for i in stdplotinds:
visdataui.stdcsvplotchoiceComboBox.setCurrentIndex(i)
visdataui.plot_preparestandardplot()
inds = numpy.where(numpy.logical_not(
numpy.isnan(visdataui.fomplotd['fom'])))[0]
if len(inds) > 0:
samplestoplot = list(visdataui.fomplotd['sample_no'][inds])
filterinds = [ind for ind, smp in enumerate(
visdataui.fomplotd['sample_no']) if smp in samplestoplot]
for k in visdataui.fomplotd.keys():
if isinstance(visdataui.fomplotd[k], numpy.ndarray):
visdataui.fomplotd[k] = visdataui.fomplotd[k][filterinds]
else:
print k
vmin = max(0, visdataui.fomplotd['fom'].min())*0.99
vmax = numpy.percentile(visdataui.fomplotd['fom'], 95.)
if visdataui.fomplotd['fom'].max() < 1.1*vmax:
vmax = visdataui.fomplotd['fom'].max()
if not numpy.all((visdataui.fomplotd['fom'] < vmin) | (visdataui.fomplotd['fom'] > vmax)):
visdataui.vminmaxLineEdit.setText(
'%.3f,%.3f' % (vmin, vmax))
visdataui.plotfom()
visdataui.vminmaxLineEdit.setText('')
imagesidialog = visdataui.savefigs(justreturndialog=True)
imagesidialog.widget_plow_dlist[0]['item'].setCheckState(
0, Qt.Checked) # plate
imagesidialog.widget_plow_dlist[1]['item'].setCheckState(
0, Qt.Unchecked) # code
imagesidialog.widget_plow_dlist[2]['item'].setCheckState(
0, Qt.Unchecked) # xy
imagesidialog.ExitRoutine()
if i >= 12:
inds = numpy.where(numpy.logical_not(numpy.isnan(visdataui.fomplotd['fom'])) & (
visdataui.fomplotd['fom'] >= crit_pmax_mwcm2_for_fillfactor))[0]
if len(inds) > 0:
samplestoplot = list(visdataui.fomplotd['sample_no'][inds])
plot_new_fom(visdataui, 'Fill_factor')
filterinds = [ind for ind, smp in enumerate(
visdataui.fomplotd['sample_no']) if smp in samplestoplot]
for k in visdataui.fomplotd.keys():
if isinstance(visdataui.fomplotd[k], numpy.ndarray):
visdataui.fomplotd[k] = visdataui.fomplotd[k][filterinds]
else:
print k
vmin = max(0, visdataui.fomplotd['fom'].min())*0.99
vmax = min(0.8, visdataui.fomplotd['fom'].max())*1.01
if not numpy.all((visdataui.fomplotd['fom'] < vmin) | (visdataui.fomplotd['fom'] > vmax)):
visdataui.vminmaxLineEdit.setText(
'%.3f,%.3f' % (vmin, vmax))
visdataui.plotfom()
visdataui.vminmaxLineEdit.setText('')
imagesidialog = visdataui.savefigs(
justreturndialog=True)
imagesidialog.widget_plow_dlist[0]['item'].setCheckState(
0, Qt.Checked) # plate
imagesidialog.widget_plow_dlist[1]['item'].setCheckState(
0, Qt.Unchecked) # code
imagesidialog.widget_plow_dlist[2]['item'].setCheckState(
0, Qt.Unchecked) # xy
# if need to convert to .done and skipped the fill factor plot, try this - not tested
if i == stdplotinds[-1] and 'done' in anasaveextension:
imagesidialog.doneCheckBox.setChecked(Qt.Checked)
imagesidialog.ExitRoutine()
visdataui.importana(p=imagesidialog.newanapath)
else:
imagesidialog.ExitRoutine()
# if need to convert to .done and skipped the fill factor plot, try this resave of last image- not tested
elif i == stdplotinds[-1] and 'done' in anasaveextension:
imagesidialog = visdataui.savefigs(justreturndialog=True)
imagesidialog.doneCheckBox.setChecked(Qt.Checked)
imagesidialog.ExitRoutine()
visdataui.importana(p=imagesidialog.newanapath)
mainapp.quit()
# user-entered parameters for mA/cm2 calculation ond chooseing eqe plots
# measurement_area_override=0.58 # for 1.48 mm diameter spot
measurement_area_override = 0.39 # for 1.8 mm diameter spot
mineqeforplot = 1.e-3
crit_pmax_mwcm2_for_fillfactor = .06
illum_key = 'Toggle'
expsaveextension = '.run'
anasaveextension = '.run'
# providing these paths will skip the generation of the exp/ana
expname = None
ananame = None
expdestchoice = 'eche'
anadestchoice = 'eche'
process_tups = (
# ('20191025_SbCrO_54065', [11758, 11763, 11769, 11775, 11781, 11787, 11793, 11799, 11805, 11811, 11817, 11822, 11828,
# 11834, 11840, 11846, 11852, 11858, 11864, 11870, 11876, 11882, 11887, 11893, 11899, 11905, 11911, 11917, 11923]),
# ('20191008_FeSbO_54155', [16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262, 16268,
# 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363, 16369]),
# ('20191119_NbMnO_27841', [16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262, 16268,
# 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363]),
# ('20191029_CuInO_46550', [16192, 16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262,
# 16268, 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363]),
# ('20191030_SbCoO_22981', [16192, 16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262,
# 16268, 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363]),
# ('20191009_SbCuO_41308', [16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262, 16268,
# 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16357, 16363, 16369]),
# ('20191031_SbNiO_22835', [16192, 16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262, 16268,
# 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363, 16369]),
# ('20191025_SbCrO_54076', [16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262, 16268,
# 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363]),
# ('20191028_SnMnO_27953', [16192, 16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262,
# 16268, 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357]),
# ('20191003_PbSbO_54357', [16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262, 16268,
# 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363]),
# ('20191031_PbSbO_22970', [16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262, 16268,
# 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363]),
# ('20191119_ZnSbO_23005', [16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262,
# 16268, 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357]),
# ('20191007_CoSbO_54403', [16203, 16209, 16215, 16221, 16227, 16233, 16239, 16251, 16257, 16262, 16268, 16274,
# 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363, 16369]),
# ('20191030_FeSbO_22868', [16192, 16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262,
# 16268, 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16351, 16357, 16363]),
# ('20191004_BiSbO_54166', [16203, 16209, 16233, 16245, 16251, 16257, 16262, 16268, 16274, 16280,
# 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363]),
# ('20191007_YSbO_54368', [16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262, 16268,
# 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363]),
# ('20191028_MnCuO_42130', [11787, 11793, 11799, 11805, 11811, 11817, 11822, 11828, 11834, 11840,
# 11846, 11852, 11858, 11864, 11870, 11876, 11882, 11893, 11899, 11905, 11911, 11917, 11923]),
# ('20191008_ZnSbO_54177', [16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262, 16268,
# 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363, 16369]),
('20191031_BiSbO_35312', [16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262, 16268,
16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363]),
# ('20191029_TaMnO_39956', [16192, 16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262, 16268,
# 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363, 16369]),
# ('20191022_NiSbO_54425', [16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262, 16268,
# 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363]),
# ('20191029_WBiO_46583', [16192, 16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257, 16262,
# 16268, 16274, 16280, 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363]),
# ('20191021_FeSbO_57181', [16198, 16203, 16221, 16227, 16239, 16251, 16257, 16262, 16268, 16274, 16280,
# 16286, 16292, 16298, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357, 16363]),
('20191022_FeSbO_57170', [11758, 11763, 11769, 11775, 11781, 11787, 11793, 11799, 11805, 11811, 11817, 11822, 11828,
11834, 11840, 11846, 11852, 11858, 11864, 11870, 11876, 11882, 11887, 11893, 11899, 11905, 11911, 11917, 11923]),
# ('20191009_InSbO_54188', [16192, 16198, 16203, 16209, 16215, 16221, 16227, 16233, 16239, 16245, 16251, 16257,
# 16262, 16268, 16274, 16280, 16286, 16292, 16304, 16310, 16316, 16322, 16327, 16333, 16339, 16345, 16351, 16357])
)
for runfolder, keepsmplist in process_tups:
process_run(runfolder, keepsmplist)
```
#### File: johnmgregoire/JCAPDataProcess/CombineFomApp.py
```python
import csv
import os, os.path
import sys
import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
if __name__ == "__main__":
import os, sys
projectpath=os.path.split(os.path.abspath(__file__))[0]
sys.path.append(os.path.join(projectpath,'AuxPrograms'))
from fcns_math import *
from fcns_io import *
class messageDialog(QDialog):
def __init__(self, parent=None, title=''):
super(messageDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
QObject.connect(self.buttonBox, SIGNAL("rejected()"), self.reject)
mainlayout.addWidget(self.buttonBox, 0, 0)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
def ExitRoutine(self):
return
def mygetopenfile(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getOpenFileName(xparent,''.join(['Select file to open:', markstr]),os.path.join(xpath, filename).replace('\\','/')))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getOpenFileName(parent,''.join(['Select file to open: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
def mygetopenfiles(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfns=QFileDialog.getOpenFileNames(xparent,''.join(['Select file to open:', markstr]),os.path.join(xpath, filename).replace('\\','/'))
xparent.destroy()
xapp.quit()
else:
returnfns=QFileDialog.getOpenFileNames(parent,''.join(['Select file to open: ', markstr]),os.path.join(xpath, filename).replace('\\','/'))
return [str(s) for s in returnfns]
def mygetsavefile(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getSaveFileName(xparent,''.join(['Select file for save: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getSaveFileName(parent,''.join(['Select file for save: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
def mygetdir(parent=None, xpath="%s" % os.getcwd(),markstr='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getExistingDirectory(xparent,''.join(['Select directory:', markstr]), xpath))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getExistingDirectory(parent,''.join(['Select directory:', markstr]), xpath))
def userinputcaller(parent, inputs=[('testnumber', int)], title='Enter values', cancelallowed=True):
problem=True
while problem:
idialog=userinputDialog(parent, inputs, title)
idialog.exec_()
problem=idialog.problem
if not idialog.ok and cancelallowed:
return None
inputs=[(tup[0], tup[1], s) for tup, s in zip(inputs, idialog.inputstrlist)]
return idialog.ans
class userinputDialog(QDialog):
def __init__(self, parent, inputs=[('testnumber', int, '')], title='Enter values'):
super(userinputDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.parent=parent
self.inputs=inputs
self.lelist=[]
for i, tup in enumerate(self.inputs):
lab=QLabel()
lab.setText(tup[0])
le=QLineEdit()
if len(tup)>2:
le.setText(tup[2])
self.lelist+=[le]
mainlayout.addWidget(lab, 0, i, 1, 1)
mainlayout.addWidget(le, 1, i, 1, 1)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox, 2, 0, len(inputs), 1)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
self.setLayout(mainlayout)
QMetaObject.connectSlotsByName(self)
self.problem=False
self.ok=False
def ExitRoutine(self):
self.ok=True
self.problem=False
self.ans=[]
self.inputstrlist=[str(le.text()).strip() for le in self.lelist]
for s, tup in zip(self.inputstrlist, self.inputs):
if tup[1]==str:
try:
self.ans+=[s]
except:
self.problem=True
break
else:
try:
n=myeval(s)
self.ans+=[tup[1](n)]
except:
self.problem=True
break
if self.problem:
idialog=messageDialog(self, 'problem with conversion of ' + tup[0])
idialog.exec_()
class selectoutputsDialog(QDialog):
def __init__(self, parent, keylists, title='Select columns for outputfile values'):
super(selectoutputsDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.keylists=keylists
self.cblists=[]
for i, kl in enumerate(self.keylists):
cbl=[]
for j, k in enumerate(kl):
cb=QCheckBox()
cb.setText(k)
if (i==0 and j>0) or j==(len(kl)-1):
cb.setChecked(1)
else:
cb.setChecked(0)
mainlayout.addWidget(cb, j, i)
cbl+=[cb]
self.cblists+=[cbl]
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox, j+1, 0, 1, i+1)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
self.setLayout(mainlayout)
QMetaObject.connectSlotsByName(self)
def ExitRoutine(self):
self.keylistsselected=[]
for cbl, kl in zip(self.cblists, self.keylists):
kl_s=[]
for cb, k in zip(cbl, kl):
if cb.isChecked():
kl_s+=[k]
self.keylistsselected+=[kl_s]
class combinefomDialog(QDialog):
def __init__(self, parent, title='', folderpath=None):
super(combinefomDialog, self).__init__(parent)
self.parent=parent
# folderButton=QPushButton()
# folderButton.setText("select\nfolder")
# QObject.connect(folderButton, SIGNAL("pressed()"), self.selectfolder)
#
# plotButton=QPushButton()
# plotButton.setText("update\nfigures")
# QObject.connect(plotButton, SIGNAL("pressed()"), self.calcandplot)
#
saveButton=QPushButton()
saveButton.setText("Select Files\nTo Combine")
QObject.connect(saveButton, SIGNAL("pressed()"), self.save)
self.cb=QCheckBox()
self.cb.setText('check=union\nuncheck=intersection\nof sample_no')
mainlayout=QGridLayout()
mainlayout.addWidget(saveButton, 0, 0)
mainlayout.addWidget(self.cb, 0, 1)
self.setLayout(mainlayout)
#self.folderpath=folderpath
#self.resize(600, 850)
def save(self):
dpl=mygetopenfiles(parent=self, markstr='FOM .txt files', filename='.txt')
smpkeys=['sample_no', 'Sample']
keylists=[]
dropdl=[]
for dp in dpl:
if dp=='':
dropdl+=[None]
continue
with open(dp, mode='r') as f:
lines=f.readlines()
templist=[(i, [l.startswith(k) for k in smpkeys].index(True)) for i, l in enumerate(lines) if True in [l.startswith(k) for k in smpkeys]]
if len(templist)==0:
print 'sample_no not found as left-most column for %s' %dp
headingslineind, smpkeyind=templist[0]
smpkey=smpkeys[smpkeyind]
delim=lines[headingslineind][len(smpkey)]
arr=readtxt_selectcolumns(dp, delim=delim, num_header_lines=headingslineind+1, floatintstr=str, zipclass=False)
dropd={}
kl=lines[headingslineind].split(delim)
kl=[k.strip() for k in kl]
for k, a in zip(kl, arr):
if k==smpkey:
k='sample_no'
dropd[k]=list(a)
dropdl+=[dropd]
f=open(dp, mode='r')
l=f.readlines()[0]
f.close()
keylists+=[kl]
#keylists+=[list(dropd.keys())]
idialog=selectoutputsDialog(self,keylists)
idialog.exec_()
keylistsselected=idialog.keylistsselected
smplists=[d['sample_no'] for d in dropdl]
smpinters=set(smplists[0])
unionbool=self.cb.isChecked()
for sl in smplists:
if unionbool:
smpinters=smpinters.union(set(sl))
else:
smpinters=smpinters.intersection(set(sl))
smpinters=numpy.array(list(smpinters))
seval=[myeval(s) for s in smpinters]
inds=numpy.argsort(seval)
smpinters=smpinters[inds]
lines=[['sample_no']]
for kl in keylistsselected:
lines[0]+=kl
for s in smpinters:
ll=[s]
for d, sl, kl in zip(dropdl, smplists, keylistsselected):
if s in sl:
i=sl.index(s)
ll+=[d[k][i] for k in kl]
else:
ll+=['NaN' for k in kl]
lines+=[ll]
s='\n'.join([','.join([v for v in ll]) for ll in lines])
sp=mygetsavefile(parent=self, xpath=os.path.split(dpl[0])[0],markstr='savefile', filename='combinedfom.txt' )
f=open(sp, mode='w')
f.write(s)
f.close()
class messageDialog(QDialog):
def __init__(self, parent=None, title=''):
super(messageDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
QObject.connect(self.buttonBox, SIGNAL("rejected()"), self.reject)
mainlayout.addWidget(self.buttonBox, 0, 0)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
def ExitRoutine(self):
return
if __name__ == "__main__":
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True, **kwargs):#, TreeWidg):
super(MainMenu, self).__init__(None)
self.combinefomui=combinefomDialog(self, title='Combine FOM from multiple files', **kwargs)
if execute:
self.combinefomui.exec_()
mainapp=QApplication(sys.argv)
form=MainMenu(None)
form.show()
form.setFocus()
mainapp.exec_()
```
#### File: johnmgregoire/JCAPDataProcess/FileManagementApp.py
```python
import time, shutil
import os, os.path
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
try:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
except ImportError:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
projectpath=os.path.split(os.path.abspath(__file__))[0]
sys.path.append(os.path.join(projectpath,'QtForms'))
sys.path.append(os.path.join(projectpath,'AuxPrograms'))
sys.path.append(os.path.join(projectpath,'OtherApps'))
#from fcns_math import *
from fcns_io import *
from fcns_ui import *
from FileManagementForm import Ui_FileManDialog
from DBPaths import *
class filemanDialog(QDialog, Ui_FileManDialog):
def __init__(self, parent=None, title='', folderpath=None):
super(filemanDialog, self).__init__(parent)
self.setupUi(self)
self.parent=parent
button_fcn=[\
(self.deletefoldersButton, self.deletefolders), \
(self.findfoldersButton, self.findfolders), \
]
#(self.UndoExpPushButton, self.undoexpfile), \
# (self.EditParamsPushButton, self.editrunparams), \
#(self.EditExpParamsPushButton, self.editexpparams), \
for button, fcn in button_fcn:
QObject.connect(button, SIGNAL("pressed()"), fcn)
self.treeWidget=self.foldersTreeWidget
self.toplevelitems=[]
self.anafolder=tryprependpath(ANAFOLDERS_L, '')
self.expfolder=tryprependpath(EXPFOLDERS_L, '')
if len(self.anafolder)==0 and len(self.expfolder)==0:
print 'cannot find exp or ana folder'
return
def deletefolders(self):
for mainitem, fold in zip(self.toplevelitems, [self.expfolder, self.anafolder]):
if mainitem is None or not bool(mainitem.checkState(0)):
continue
subitems=[mainitem.child(i) for i in range(mainitem.childCount()) if bool(mainitem.child(i).checkState(0))]
delpaths=[os.path.join(os.path.join(fold, str(subitem.text(0))), str(subitem.child(i).text(0))) for subitem in subitems for i in range(subitem.childCount()) if bool(subitem.child(i).checkState(0))]
for p in delpaths:
shutil.rmtree(p, ignore_errors=True)
print 'removed ', p
if bool(mainitem.checkState(0)):
idialog=messageDialog(self, 'folders deleted: ANA temp folder possibly deleted \nso restart before performing analysis')
idialog.exec_()
def findfolders(self):
self.treeWidget.clear()
self.toplevelitems=[]
self.endswith=str(self.endswithLineEdit.text())
for i, (lab, fold) in enumerate(zip(['EXP', 'ANA'], [self.expfolder, self.anafolder])):
if len(fold)==0: #didn't find exp or ana folder but found other one
self.toplevelitems+=[None]
continue
mainitem=QTreeWidgetItem([lab], 0)
mainitem.setFlags(mainitem.flags() | Qt.ItemIsUserCheckable)
mainitem.setCheckState(0, Qt.Checked)
if i==0:
item0=mainitem
self.treeWidget.addTopLevelItem(mainitem)
self.nestedfill(fold, mainitem, 'top', endswith=None)
mainitem.setExpanded(True)
self.toplevelitems+=[mainitem]
self.treeWidget.setCurrentItem(item0)
def nestedfill(self, fold, parentitem, level, endswith='.run'):
subfolds=[fn for fn in os.listdir(fold) if os.path.isdir(os.path.join(fold, fn))]
if not endswith is None:
subfolds=[fn for fn in subfolds if fn.endswith(endswith)]
for fn in subfolds:
item=QTreeWidgetItem([fn], 0)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
if level=='top' and fn!='temp':#don't auto check the non-temp folders like eche, uvis, imag
item.setCheckState(0, Qt.Unchecked)
else:
item.setCheckState(0, Qt.Checked)
if level=='top':
p=os.path.join(fold, fn)
#print p
addbool=self.nestedfill(p, item, 'sub', endswith=self.endswith)
addbool=addbool>0
else:
addbool=True
if addbool:
parentitem.addChild(item)
return len(subfolds)
if __name__ == "__main__":
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True, **kwargs):#, TreeWidg):
super(MainMenu, self).__init__(None)
#self.setupUi(self)
self.filemanui=filemanDialog(self, title='Delete obsolete .run folders', **kwargs)
#self.expui.importruns(pathlist=['20150422.145113.donex.zip'])
#self.expui.importruns(pathlist=['uvis'])
if execute:
self.filemanui.exec_()
os.chdir('//htejcap.caltech.edu/share/home/users/hte/demo_proto')
mainapp=QApplication(sys.argv)
form=MainMenu(None)
form.show()
form.setFocus()
mainapp.exec_()
```
#### File: JCAPDataProcess/one_off_routines/20181129_ingest_CU_Bcknd.py
```python
import os,shutil
import pandas as pd
import numpy as np
def import_CU_Multi_Bcknd_as_ana_block(srcfolder,anafolder,fom_segment_min_index_spacing=6,anak='ana__2'):
def get_num_segments(arr):
indsarr=np.where((arr[:-1]<=0.5)&(arr[1:]>0.5))[0]
if len(indsarr)==0:
return 0
return ((indsarr[1:]-indsarr[:-1])>fom_segment_min_index_spacing).sum()+int(indsarr[0]>fom_segment_min_index_spacing)
pid=int(srcfolder.rpartition('_')[2])
keystr='sample_no,runint,plate_id,num_pts_above_bcknd,smooth_num_pts_above_bcknd,num_segments_above_bcknd,smooth_num_segments_above_bcknd,max_signal_prob,max_smooth_signal_prob'
numk=keystr.count(',')+1
indent=' '
paramsfromfile=''
tups=[]
filelists=[[],[],[]]
for fn in os.listdir(srcfolder):
pr=os.path.join(srcfolder,fn)
nfn=anak+'_'+fn
pn=os.path.join(anafolder,nfn)
if fn=='Bcknd_Summary.csv':
with open(pr,mode='r') as f: lines=f.readlines()
orig_summ_keys=lines[0].strip().split(',')
inds=[count for count,k in enumerate(orig_summ_keys) if 'bcknd_weight__' in k]
i0=inds[0]
i1=inds[-1]
if inds!=range(i0,i1+1):
print 'WARNING NON CONSEC KEYS: ',inds,orig_summ_keys
keep_summ_keys=orig_summ_keys[i0:i1+1]
new_key_str=','.join([keystr]+keep_summ_keys)
filelists[0].append('%s: csv_fom_file;%s;19;%d' %(nfn,new_key_str,len(lines)-1))
csvstartstr=('1\t%d\t%d\t17\ncsv_version: 1\nplot_parameters:' %(numk+len(keep_summ_keys),len(lines)-1))+\
'\n plot__1:\n colormap: jet\n colormap_over_color: (0.5,0.,0.)\n colormap_under_color: (0.,0.,0.)\n fom_name: max_smooth_signal_prob' +\
'\n plot__2:\n colormap: jet\n colormap_over_color: (0.5,0.,0.)\n colormap_under_color: (0.,0.,0.)\n fom_name: smooth_num_pts_above_bcknd' +\
'\n plot__3:\n colormap: jet\n colormap_over_color: (0.5,0.,0.)\n colormap_under_color: (0.,0.,0.)\n fom_name: smooth_num_segments_above_bcknd'
summ_smps=[int(s.partition(',')[0]) for s in lines[1:]]
summ_keepstrs=[','.join(s.split(',')[i0:i1+1]) for s in lines[1:]]
p_summ=pn
elif 'Bcknd_Factors' in fn:
shutil.copy(pr,pn)
with open(pn,mode='r') as f: lines=f.readlines()
filelists[1].append('%s: rams_misc_file;%s;1;%d' %(nfn,lines[0].strip(),len(lines)-1))
elif 'Bcknd_Sample_' in fn:
shutil.copy(pr,pn)
d=pd.read_csv(pn)
x=np.array(d.as_matrix())
smp=int(fn.rpartition('_')[2].partition('.')[0])
tups.append((smp,1,pid,(x[:,2]>0.5).sum(),(x[:,3]>0.5).sum(),get_num_segments(x[:,2]),get_num_segments(x[:,3]),x[:,2].max(),x[:,3].max()))
filelists[2].append('%s: rams_inter_rawlen_file;%s;1;%d;%d' %(nfn,','.join(d.keys()),len(x),smp))
elif fn=='Bcknd_Init.txt':
with open(pr,mode='r') as f: lines=f.readlines()
lines=[indent*2+l.strip() for l in lines]
paramsfromfile='\n'.join(lines)
new_summ_lines=[csvstartstr,new_key_str]
for t in sorted(tups):#this will only keep lines of summary for sample_no with individual sample files, and if there is an individual file not in the summary there will be an error
i=summ_smps.index(t[0])
new_summ_lines.append(','.join(['%d,%d,%d,%d,%d,%d,%d,%.5f,%.5f' %t]+[summ_keepstrs[i]]))
filestr='\n'.join(new_summ_lines)
with open(p_summ,mode='w') as f: f.write(filestr)
s=anak
s+=':\n plate_ids: %d\n analysis_fcn_version: 1\n technique: rams\n analysis_general_type: analysis_of_ana\n description: multi-rank background identification and subtraction\n name: Analysis__CU_Multi_Bcknd\n parameters:\n select_ana: ana__1\n%s\n fom_segment_min_index_spacing: %d\n plot_parameters:\n plot__1:\n x_axis: wavenumber._cm\n series__1: smooth_signal_probability_pattern' \
%(pid,paramsfromfile,fom_segment_min_index_spacing)
analines=[s]
analines.append(' files_multi_run:\n fom_files:\n'+'\n'.join([indent*3+filedesc for filedesc in filelists[0]]))
analines.append(' misc_files:\n'+'\n'.join([indent*3+filedesc for filedesc in filelists[1]]))
analines.append(' files_run__1:\n inter_rawlen_files:\n'+'\n'.join([indent*3+filedesc for filedesc in filelists[2]]))
pana=os.path.join(anafolder,[fn for fn in os.listdir(anafolder) if fn.endswith('.ana')][0])
with open(pana,mode='r') as f: fs=f.read()
anafilestr='\n'.join([fs.strip()]+analines)
with open(pana,mode='w') as f: f.write(anafilestr)
with open(os.path.join(srcfolder,'anablock.txt'),mode='w') as f: f.write('\n'.join(analines))
#anafolder=r'L:\processes\analysis\rams\20181205.140000.run'
#
#for anaint,rank in [(2,1),(3,2),(4,4),(5,8)]:
# foldname='rank%d_4832' %rank
# anak='ana__%d' %(anaint)
# srcfolder=os.path.join(r'D:\data\201812_MultiBcknd_4832',foldname)
# import_CU_Multi_Bcknd_as_ana_block(srcfolder,anafolder,fom_segment_min_index_spacing=6,anak=anak)
```
#### File: JCAPDataProcess/OtherApps/FileSearchApp.py
```python
import time, shutil, glob
import os, os.path
import sys
import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import operator
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
try:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
except ImportError:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import numpy.ma as ma
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import pylab
import pickle
#from fcns_math import *
from fcns_io import *
from fcns_ui import *
from FileSearchForm import Ui_filesearchDialog
from DBPaths import *
class filesearchDialog(QDialog, Ui_filesearchDialog):
def __init__(self, parent=None, title='', folderpath=None):
super(filesearchDialog, self).__init__(parent)
self.setupUi(self)
self.parent=parent
QObject.connect(self.treeWidget, SIGNAL('itemDoubleClicked(QTreeWidgetItem*, int)'), self.openpath)
button_fcn=[\
(self.findfoldersButton, self.findfolders), \
]
#(self.UndoExpPushButton, self.undoexpfile), \
# (self.EditParamsPushButton, self.editrunparams), \
#(self.EditExpParamsPushButton, self.editexpparams), \
for button, fcn in button_fcn:
QObject.connect(button, SIGNAL("pressed()"), fcn)
self.toplevelitems=[]
def openpath(self, item, column):
s=str(item.text(column))
while not item.parent() is None:
item=item.parent()
s=r'%s\%s' %(str(item.text(0)).rpartition('~')[2], s)
s=s.replace(chr(47),chr(92))
ans=userinputcaller(self, inputs=[('file path', str, s)], title='Path available for copy', cancelallowed=True)
# if ans is None:
# return
# ans=ans[0].strip()
def findfolders(self):
self.treeWidget.clear()
self.toplevelitems=[]
self.withinstr=str(self.withinfileLineEdit.text()).strip()
self.foldersearchstr=str(self.foldernameLineEdit.text()).strip()
searchtups=[]
for i, (cb, lab, fold) in enumerate(zip([self.exp_k_checkBox, self.exp_j_checkBox, self.ana_k_checkBox, self.ana_j_checkBox], ['EXP', 'EXP', 'ANA', 'ANA'], [tryprependpath(EXPFOLDERS_L, ''), tryprependpath(EXPFOLDERS_J, ''), tryprependpath(ANAFOLDERS_L, ''), tryprependpath(ANAFOLDERS_J, '')])):
if len(fold)==0 or not cb.isChecked(): #didn't find exp or ana folder but found other one
self.toplevelitems+=[None]
continue
mainitem=QTreeWidgetItem(['%s~%s' %(lab, fold.rstrip(chr(47)).rstrip(chr(92)))], 0)
# mainitem.setFlags(mainitem.flags() | Qt.ItemIsUserCheckable)
# mainitem.setCheckState(0, Qt.Checked)
if i==0:
item0=mainitem
self.treeWidget.addTopLevelItem(mainitem)
for typefold in [fn for fn in os.listdir(fold) if os.path.isdir(os.path.join(fold, fn))]:
item=QTreeWidgetItem([typefold], 0)
typefoldpath=os.path.join(fold, typefold)
addtypefold=False
for expanafold in [fn for fn in os.listdir(typefoldpath) if self.foldersearchstr in fn and os.path.isdir(os.path.join(typefoldpath, fn))]:
expanafoldpath=os.path.join(typefoldpath, expanafold)
fnstart='.'.join(expanafold.split('.')[:2])
expanafn=fnstart+'.'+lab.lower()
p=os.path.join(expanafoldpath, expanafn)
if not os.path.isfile(p):
continue
if self.withinstr:
with open(p, mode='r') as f:
found=self.withinstr in f.read()
if not found:
continue
subitem=QTreeWidgetItem([r'%s/%s' %(expanafold, expanafn)], 0)
item.addChild(subitem)
addtypefold=True
if addtypefold:
mainitem.addChild(item)
mainitem.setExpanded(True)
self.toplevelitems+=[mainitem]
break
self.treeWidget.setCurrentItem(item0)
def nestedfill(self, fold, parentitem, fnendswith, level):
subfolds=[fn for fn in os.listdir(fold) if os.path.isdir(os.path.join(fold, fn))]
addbool=(level=='top')
for fn in subfolds:
item=QTreeWidgetItem([fn], 0)
# item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
# if level=='top' and fn!='temp':#don't auto check the non-temp folders like eche, uvis, imag
# item.setCheckState(0, Qt.Unchecked)
# else:
# item.setCheckState(0, Qt.Checked)
p=os.path.join(fold, fn)
print p
subaddbool=self.nestedfill(p, item, fnendswith,'sub')
if subaddbool:
parentitem.addChild(item)
addbool=addbool or subaddbool
if self.foldersearchstr in fold:
fnstoadd=[fn for fn in os.listdir(fold) if os.path.isfile(os.path.join(fold, fn)) and fn.endswith(fnendswith)]
for fnadd in fnstoadd:
item=QTreeWidgetItem([fn], 0)
parentitem.addChild(item)
return (len(fnstoadd)>0) or addbool
if __name__ == "__main__":
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True, **kwargs):#, TreeWidg):
super(MainMenu, self).__init__(None)
#self.setupUi(self)
self.filesearchui=filesearchDialog(self, title='Search for exp/ana files', **kwargs)
#self.expui.importruns(pathlist=['20150422.145113.donex.zip'])
#self.expui.importruns(pathlist=['uvis'])
if execute:
self.filesearchui.exec_()
os.chdir('//htejcap.caltech.edu/share/home/users/hte/demo_proto')
mainapp=QApplication(sys.argv)
form=MainMenu(None)
form.show()
form.setFocus()
mainapp.exec_()
```
#### File: JCAPDataProcess/OtherApps/SaveImagesApp.py
```python
import string
import os, os.path
#import sys, shutil
#import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#import operator
import matplotlib
#from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#try:
# from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
#except ImportError:
# from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
#from matplotlib.figure import Figure
#import numpy.ma as ma
#import matplotlib.colors as colors
#import matplotlib.cm as cm
#import matplotlib.mlab as mlab
#import pylab
#import pickle
#from fcns_math import *
from fcns_io import *
from fcns_ui import *
#from VisualizeAuxFcns import *
from SaveImagesForm import Ui_SaveImagesDialog
from SaveImagesBatchForm import Ui_SaveImagesBatchDialog
from fcns_compplots import *
#from quatcomp_plot_options import quatcompplotoptions
matplotlib.rcParams['backend.qt4'] = 'PyQt4'
class saveimagesDialog(QDialog, Ui_SaveImagesDialog):
def __init__(self, parent, anafolder, fomname, plateid_dict_list=[], code_dict_list=[], histplow=None, xyplotw=None, selectsamplebrowser=None, x_y_righty=['x', 'y', ''], repr_anaint_plots=1, filenamesearchlist=None):
#filenamesearchlist is nested list, level 0 of filenamesearchlist is OR and level 1 is AND
super(saveimagesDialog, self).__init__(parent)
self.setupUi(self)
self.parent=parent
self.plateid_dict_list=plateid_dict_list
self.code_dict_list=code_dict_list
self.repr_anaint_plots=repr_anaint_plots
if '.zip' in anafolder:
idialog=messageDialog(self, 'Cannot save to ANA because it is in a .zip ')
idialog.exec_()
self.reject()
return
fnl=[fn for fn in os.listdir(anafolder) if fn.endswith('.ana') and not fn.startswith('.')]
if len(fnl)==0:
idialog=messageDialog(self, 'Cannot save to ANA because no .ana in the folder')
idialog.exec_()
self.reject()
return
self.anafn=fnl[0]
self.anafolder=anafolder
QObject.connect(self.FilesTreeWidget, SIGNAL('itemDoubleClicked(QTreeWidgetItem*, int)'), self.editname)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
self.widgetTopLevelItems={}
self.widgetkeys=['plate_id','code', 'xy', 'hist', 'select_samples_text']
for k in self.widgetkeys:
mainitem=QTreeWidgetItem([k], 0)
self.FilesTreeWidget.addTopLevelItem(mainitem)
mainitem.setExpanded(True)
self.widgetTopLevelItems[k]=mainitem
self.xyyname='-'.join([k for k in x_y_righty if len(k)>0])
self.fomname=fomname
if filenamesearchlist is None:
searchchecker=lambda filen:True#not used in this instance
else:
searchchecker=lambda filen:True in [not (False in [searchstr in filen for searchstr in searchlist]) for searchlist in filenamesearchlist]
self.widget_plow_dlist=[]
for widgk, val_dict_list in zip(self.widgetkeys[0:2], [self.plateid_dict_list, self.code_dict_list]):
mainitem=self.widgetTopLevelItems[widgk]
for (k, d) in val_dict_list:
filen=self.filterchars('%s__%s-%s.png' %(widgk, k, self.fomname))
s=filen+': python_visualizer_png_image'
item=QTreeWidgetItem([s], 0)
item.setFlags(mainitem.flags() | Qt.ItemIsUserCheckable)
if filenamesearchlist is None:
item.setCheckState(0, Qt.Checked if d['checked'] else Qt.Unchecked)
else:
item.setCheckState(0, Qt.Checked if searchchecker(filen) else Qt.Unchecked)
mainitem.addChild(item)
d['item']=item
self.widget_plow_dlist+=[d]
for widgk, plotw, lab in zip(self.widgetkeys[2:4], [xyplotw, histplow], [self.xyyname, self.fomname]):
if plotw is None:
continue
mainitem=self.widgetTopLevelItems[widgk]
d={'plotw':plotw}
filen=self.filterchars('%s__%s.png' %(widgk, lab))
s=filen+': python_visualizer_png_image'
item=QTreeWidgetItem([s], 0)
item.setFlags(mainitem.flags() | Qt.ItemIsUserCheckable)
if filenamesearchlist is None:
item.setCheckState(0, Qt.Unchecked)
else:
item.setCheckState(0, Qt.Checked if searchchecker(filen) else Qt.Unchecked)
mainitem.addChild(item)
d['item']=item
self.widget_plow_dlist+=[d]
self.selectsamplesname=fomname
self.widget_textbrowser_dlist=[]
for widgk, browser, lab in zip(self.widgetkeys[4:5], [selectsamplebrowser], [self.selectsamplesname]):
if browser is None:
continue
mainitem=self.widgetTopLevelItems[widgk]
d={'browser':browser}
filen=self.filterchars('%s__%s.txt' %(widgk, lab))
s=filen+': python_visualizer_txt'
item=QTreeWidgetItem([s], 0)
item.setFlags(mainitem.flags() | Qt.ItemIsUserCheckable)
if filenamesearchlist is None:
item.setCheckState(0, Qt.Unchecked)
else:
item.setCheckState(0, Qt.Checked if searchchecker(filen) else Qt.Unchecked)
mainitem.addChild(item)
d['item']=item
self.widget_textbrowser_dlist+=[d]
self.newanapath=False
def editname(self, item, column):
if item is None:
item=widget.currentItem()
s=str(item.text(column))
st=s.partition('.png: ')
v=st[0]
keepstr=''.join(st[1:])
ans=userinputcaller(self, inputs=[('filename', str, v)], title='Enter new filename', cancelallowed=True)
if ans is None or ans[0].strip()==v:
return
ans=ans[0].strip()
item.setText(column,''.join([ans, keepstr]))
def filterchars(self, s):
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
return ''.join([c for c in s if c in valid_chars])
def updateoptionsfrombatchidialog(self, batchidialog, lastbatchiteration=False):
prependstr=str(batchidialog.prependfilenameLineEdit.text())
combinedprependstr=self.filterchars(prependstr+str(self.prependfilenameLineEdit.text()))
self.prependfilenameLineEdit.setText(combinedprependstr)
self.overwriteCheckBox.setChecked(batchidialog.overwriteCheckBox.isChecked())
self.epsCheckBox.setChecked(batchidialog.epsCheckBox.isChecked())
if lastbatchiteration:#only want to convert to done on last image being batch-saved
self.doneCheckBox.setChecked(batchidialog.doneCheckBox.isChecked())#for batch save, images saved in place and then box check in the end if convert to .done chosen
def ExitRoutine(self):
overbool=self.overwriteCheckBox.isChecked()
prependstr=self.filterchars(str(self.prependfilenameLineEdit.text()))
oldp=os.path.join(self.anafolder, self.anafn)
anadict=readana(oldp, erroruifcn=None, stringvalues=True, returnzipclass=False)#cannot be a .zip
startingwithcopiedbool='copied' in os.path.split(self.anafolder)[1]
if startingwithcopiedbool or self.doneCheckBox.isChecked():#must convert to .done if starting with .copied. allows .done to be edited which is bad practice
if not os.path.split(self.anafolder)[1].count('.')>1:
idialog=messageDialog(self, 'Cannot save because ANA folder has no extension')
idialog.exec_()
return
if startingwithcopiedbool:#if modiyfing a .copied then need a new time stamp
newanafn=timestampname()+'.ana'
newanafolder=self.anafolder.rpartition('.')[0][:-15]+newanafn[:-4]+'.done'
movebool=False
else:
newanafolder=self.anafolder.rpartition('.')[0]+'.done'#this reapleces .run with .done but more generally .anything with .done
movebool=True
newanafn=self.anafn
saveana_tempfolder(None, self.anafolder, erroruifcn=None, skipana=True, anadict=None, movebool=movebool, savefolder=newanafolder, saveanafile=False)#move files if necessary but don't create .ana or .exp yet. Do this first so image files get put only into new folder
self.newanapath=os.path.join(newanafolder, newanafn)
else:#writing files and new ana into existing folder
newanafn=self.anafn
newanafolder=self.anafolder
#images here
lines=[]
for d in self.widget_plow_dlist:
if not bool(d['item'].checkState(0)):
continue
pngfn, garb, pngattr=str(d['item'].text(0)).partition(': ')
pngfn=self.filterchars(prependstr+pngfn)
existfns=os.listdir(newanafolder)
fn_attr_list=[(pngfn, pngattr)]
if self.epsCheckBox.isChecked():
fn_attr_list+=[(pngfn.replace('png', 'eps'), pngattr.replace('png', 'eps'))]
for fn, a in fn_attr_list:
if (fn in existfns) and not overbool:
i=2
fnorig=fn
while fn in existfns:
fn=''.join([fnorig[:-4], '__%d' %i, fnorig[-4:]])
i+=1
savep=os.path.join(newanafolder, fn)
existfns+=[fn]
d['plotw'].fig.savefig(savep)
lines+=[(fn, a)]
#txt here
txtlines=[]
for d in self.widget_textbrowser_dlist:
if not bool(d['item'].checkState(0)):
continue
pngfn, garb, pngattr=str(d['item'].text(0)).partition(': ')
pngfn=prependstr+pngfn
existfns=os.listdir(newanafolder)
fn_attr_list=[(pngfn, pngattr)]
for fn, a in fn_attr_list:
if (fn in existfns) and not overbool:
i=2
fnorig=fn
while fn in existfns:
fn=''.join([fnorig[:-4], '__%d' %i, fnorig[-4:]])
i+=1
savep=os.path.join(newanafolder, fn)
existfns+=[fn]
with open(savep, mode='w') as f:
f.write(str(d['browser'].toPlainText()))
txtlines+=[(fn, a)]
if (len(lines)+len(txtlines))>0:
da=anadict['ana__%d' %self.repr_anaint_plots]
if not 'files_multi_run' in da.keys():
da['files_multi_run']={}
df=da['files_multi_run']
if len(lines)>0:
if not 'image_files' in df.keys():
df['image_files']={}
d=df['image_files']
for fn, a in lines:
d[fn]=a#if fn exists and was overwritten this will jdo nothing or update the attrstr
if len(txtlines)>0:
if not 'txt_files' in df.keys():
df['txt_files']={}
d=df['txt_files']
for fn, a in txtlines:
d[fn]=a#if fn exists and was overwritten this will jdo nothing or update the attrstr
newp=os.path.join(newanafolder, newanafn)
saveanafiles(newp, anadict=anadict, changeananame=True)#need to overwrite the name because may be a new anafolder/timestamp
class saveimagesbatchDialog(QDialog, Ui_SaveImagesBatchDialog):
def __init__(self, parent, comboind_strlist):
super(saveimagesbatchDialog, self).__init__(parent)
self.setupUi(self)
self.parent=parent
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
self.widgetTopLevelItems={}
self.comboind_strlist=comboind_strlist
for comboind, k in self.comboind_strlist:
mainitem=QTreeWidgetItem([k], 0)
mainitem.setFlags(mainitem.flags() | Qt.ItemIsUserCheckable)
mainitem.setCheckState(0, Qt.Checked)
self.FilesTreeWidget.addTopLevelItem(mainitem)
self.widgetTopLevelItems[k]={}
self.widgetTopLevelItems[k]['item']=mainitem
self.widgetTopLevelItems[k]['comboind']=comboind
def ExitRoutine(self):
self.selectcomboboxinds=sorted([d['comboind'] for d in self.widgetTopLevelItems.values() if bool(d['item'].checkState(0))])
```
#### File: JCAPDataProcess/QtForms/CalcFOMForm.py
```python
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_CalcFOMDialog(object):
def setupUi(self, CalcFOMDialog):
CalcFOMDialog.setObjectName(_fromUtf8("CalcFOMDialog"))
CalcFOMDialog.resize(1142, 882)
self.BatchComboBox = QtGui.QComboBox(CalcFOMDialog)
self.BatchComboBox.setGeometry(QtCore.QRect(10, 80, 271, 22))
self.BatchComboBox.setObjectName(_fromUtf8("BatchComboBox"))
self.BatchPushButton = QtGui.QPushButton(CalcFOMDialog)
self.BatchPushButton.setGeometry(QtCore.QRect(10, 60, 131, 21))
self.BatchPushButton.setObjectName(_fromUtf8("BatchPushButton"))
self.gridLayoutWidget_3 = QtGui.QWidget(CalcFOMDialog)
self.gridLayoutWidget_3.setGeometry(QtCore.QRect(10, 360, 261, 163))
self.gridLayoutWidget_3.setObjectName(_fromUtf8("gridLayoutWidget_3"))
self.gridLayout_2 = QtGui.QGridLayout(self.gridLayoutWidget_3)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_11 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout_2.addWidget(self.label_11, 4, 0, 1, 2)
self.label_17 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_17.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout_2.addWidget(self.label_17, 1, 0, 1, 1)
self.label_18 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_18.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_18.setObjectName(_fromUtf8("label_18"))
self.gridLayout_2.addWidget(self.label_18, 2, 0, 1, 1)
self.label_19 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_19.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_19.setObjectName(_fromUtf8("label_19"))
self.gridLayout_2.addWidget(self.label_19, 3, 0, 1, 1)
self.UserNameLineEdit = QtGui.QLineEdit(self.gridLayoutWidget_3)
self.UserNameLineEdit.setObjectName(_fromUtf8("UserNameLineEdit"))
self.gridLayout_2.addWidget(self.UserNameLineEdit, 2, 1, 1, 1)
self.AnaTypeLineEdit = QtGui.QLineEdit(self.gridLayoutWidget_3)
self.AnaTypeLineEdit.setObjectName(_fromUtf8("AnaTypeLineEdit"))
self.gridLayout_2.addWidget(self.AnaTypeLineEdit, 0, 1, 1, 1)
self.AnaNameLineEdit = QtGui.QLineEdit(self.gridLayoutWidget_3)
self.AnaNameLineEdit.setEnabled(False)
self.AnaNameLineEdit.setObjectName(_fromUtf8("AnaNameLineEdit"))
self.gridLayout_2.addWidget(self.AnaNameLineEdit, 1, 1, 1, 1)
self.AccessLineEdit = QtGui.QLineEdit(self.gridLayoutWidget_3)
self.AccessLineEdit.setObjectName(_fromUtf8("AccessLineEdit"))
self.gridLayout_2.addWidget(self.AccessLineEdit, 3, 1, 1, 1)
self.label_16 = QtGui.QLabel(self.gridLayoutWidget_3)
self.label_16.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.gridLayout_2.addWidget(self.label_16, 0, 0, 1, 1)
self.AnaDescLineEdit = QtGui.QLineEdit(self.gridLayoutWidget_3)
self.AnaDescLineEdit.setObjectName(_fromUtf8("AnaDescLineEdit"))
self.gridLayout_2.addWidget(self.AnaDescLineEdit, 5, 0, 1, 2)
self.layoutWidget = QtGui.QWidget(CalcFOMDialog)
self.layoutWidget.setGeometry(QtCore.QRect(290, 10, 212, 141))
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.AnalysisGridLayout = QtGui.QGridLayout(self.layoutWidget)
self.AnalysisGridLayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.AnalysisGridLayout.setMargin(0)
self.AnalysisGridLayout.setObjectName(_fromUtf8("AnalysisGridLayout"))
self.AnalyzeDataPushButton = QtGui.QPushButton(self.layoutWidget)
self.AnalyzeDataPushButton.setObjectName(_fromUtf8("AnalyzeDataPushButton"))
self.AnalysisGridLayout.addWidget(self.AnalyzeDataPushButton, 0, 1, 1, 1)
self.EditDfltVisPushButton = QtGui.QPushButton(self.layoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.EditDfltVisPushButton.sizePolicy().hasHeightForWidth())
self.EditDfltVisPushButton.setSizePolicy(sizePolicy)
self.EditDfltVisPushButton.setObjectName(_fromUtf8("EditDfltVisPushButton"))
self.AnalysisGridLayout.addWidget(self.EditDfltVisPushButton, 1, 1, 1, 1)
self.ClearAnalysisPushButton = QtGui.QPushButton(self.layoutWidget)
self.ClearAnalysisPushButton.setObjectName(_fromUtf8("ClearAnalysisPushButton"))
self.AnalysisGridLayout.addWidget(self.ClearAnalysisPushButton, 2, 1, 1, 1)
self.EditAnalysisParamsPushButton = QtGui.QPushButton(self.layoutWidget)
self.EditAnalysisParamsPushButton.setObjectName(_fromUtf8("EditAnalysisParamsPushButton"))
self.AnalysisGridLayout.addWidget(self.EditAnalysisParamsPushButton, 0, 0, 1, 1)
self.SaveAnaPushButton = QtGui.QPushButton(self.layoutWidget)
self.SaveAnaPushButton.setObjectName(_fromUtf8("SaveAnaPushButton"))
self.AnalysisGridLayout.addWidget(self.SaveAnaPushButton, 2, 0, 1, 1)
self.ImportAnalysisParamsPushButton = QtGui.QPushButton(self.layoutWidget)
self.ImportAnalysisParamsPushButton.setObjectName(_fromUtf8("ImportAnalysisParamsPushButton"))
self.AnalysisGridLayout.addWidget(self.ImportAnalysisParamsPushButton, 1, 0, 1, 1)
self.ViewResultPushButton = QtGui.QPushButton(self.layoutWidget)
self.ViewResultPushButton.setObjectName(_fromUtf8("ViewResultPushButton"))
self.AnalysisGridLayout.addWidget(self.ViewResultPushButton, 3, 0, 1, 1)
self.ClearSingleAnalysisPushButton = QtGui.QPushButton(self.layoutWidget)
self.ClearSingleAnalysisPushButton.setObjectName(_fromUtf8("ClearSingleAnalysisPushButton"))
self.AnalysisGridLayout.addWidget(self.ClearSingleAnalysisPushButton, 3, 1, 1, 1)
self.SaveViewPushButton = QtGui.QPushButton(self.layoutWidget)
self.SaveViewPushButton.setObjectName(_fromUtf8("SaveViewPushButton"))
self.AnalysisGridLayout.addWidget(self.SaveViewPushButton, 4, 0, 1, 1)
self.UpdatePlotPushButton = QtGui.QPushButton(self.layoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.UpdatePlotPushButton.sizePolicy().hasHeightForWidth())
self.UpdatePlotPushButton.setSizePolicy(sizePolicy)
self.UpdatePlotPushButton.setObjectName(_fromUtf8("UpdatePlotPushButton"))
self.AnalysisGridLayout.addWidget(self.UpdatePlotPushButton, 4, 1, 1, 1)
self.ImportExpPushButton = QtGui.QPushButton(CalcFOMDialog)
self.ImportExpPushButton.setGeometry(QtCore.QRect(0, 30, 71, 21))
self.ImportExpPushButton.setObjectName(_fromUtf8("ImportExpPushButton"))
self.ImportAnaPushButton = QtGui.QPushButton(CalcFOMDialog)
self.ImportAnaPushButton.setGeometry(QtCore.QRect(0, 10, 71, 21))
self.ImportAnaPushButton.setObjectName(_fromUtf8("ImportAnaPushButton"))
self.textBrowser_plate = QtGui.QTextBrowser(CalcFOMDialog)
self.textBrowser_plate.setGeometry(QtCore.QRect(570, 530, 561, 341))
self.textBrowser_plate.setObjectName(_fromUtf8("textBrowser_plate"))
self.textBrowser_h = QtGui.QTextBrowser(CalcFOMDialog)
self.textBrowser_h.setGeometry(QtCore.QRect(760, 20, 371, 231))
self.textBrowser_h.setObjectName(_fromUtf8("textBrowser_h"))
self.textBrowser_comp = QtGui.QTextBrowser(CalcFOMDialog)
self.textBrowser_comp.setGeometry(QtCore.QRect(510, 250, 621, 281))
self.textBrowser_comp.setObjectName(_fromUtf8("textBrowser_comp"))
self.line = QtGui.QFrame(CalcFOMDialog)
self.line.setGeometry(QtCore.QRect(276, 10, 20, 511))
self.line.setLineWidth(2)
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.line_2 = QtGui.QFrame(CalcFOMDialog)
self.line_2.setGeometry(QtCore.QRect(500, 0, 20, 521))
self.line_2.setLineWidth(2)
self.line_2.setFrameShape(QtGui.QFrame.VLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.line_3 = QtGui.QFrame(CalcFOMDialog)
self.line_3.setGeometry(QtCore.QRect(0, 350, 281, 20))
self.line_3.setLineWidth(2)
self.line_3.setFrameShape(QtGui.QFrame.HLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName(_fromUtf8("line_3"))
self.line_4 = QtGui.QFrame(CalcFOMDialog)
self.line_4.setGeometry(QtCore.QRect(0, 99, 281, 21))
self.line_4.setLineWidth(2)
self.line_4.setFrameShape(QtGui.QFrame.HLine)
self.line_4.setFrameShadow(QtGui.QFrame.Sunken)
self.line_4.setObjectName(_fromUtf8("line_4"))
self.AnalysisNamesComboBox = QtGui.QComboBox(CalcFOMDialog)
self.AnalysisNamesComboBox.setGeometry(QtCore.QRect(290, 410, 211, 22))
self.AnalysisNamesComboBox.setObjectName(_fromUtf8("AnalysisNamesComboBox"))
self.label_20 = QtGui.QLabel(CalcFOMDialog)
self.label_20.setGeometry(QtCore.QRect(290, 390, 219, 21))
self.label_20.setObjectName(_fromUtf8("label_20"))
self.AnaTreeWidget = QtGui.QTreeWidget(CalcFOMDialog)
self.AnaTreeWidget.setGeometry(QtCore.QRect(10, 530, 551, 341))
self.AnaTreeWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.AnaTreeWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.AnaTreeWidget.setHeaderHidden(True)
self.AnaTreeWidget.setExpandsOnDoubleClick(False)
self.AnaTreeWidget.setObjectName(_fromUtf8("AnaTreeWidget"))
self.AnaTreeWidget.headerItem().setText(0, _fromUtf8("1"))
self.AnaTreeWidget.header().setVisible(False)
self.AnaTreeWidget.header().setCascadingSectionResizes(False)
self.AnaTreeWidget.header().setStretchLastSection(True)
self.getplatemapCheckBox = QtGui.QCheckBox(CalcFOMDialog)
self.getplatemapCheckBox.setGeometry(QtCore.QRect(170, 10, 111, 21))
self.getplatemapCheckBox.setChecked(True)
self.getplatemapCheckBox.setObjectName(_fromUtf8("getplatemapCheckBox"))
self.CompPlotOrderComboBox = QtGui.QComboBox(CalcFOMDialog)
self.CompPlotOrderComboBox.setGeometry(QtCore.QRect(520, 220, 111, 22))
self.CompPlotOrderComboBox.setObjectName(_fromUtf8("CompPlotOrderComboBox"))
self.label_2 = QtGui.QLabel(CalcFOMDialog)
self.label_2.setGeometry(QtCore.QRect(520, 200, 111, 21))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label = QtGui.QLabel(CalcFOMDialog)
self.label.setGeometry(QtCore.QRect(520, 90, 111, 16))
self.label.setObjectName(_fromUtf8("label"))
self.CompPlotTypeComboBox = QtGui.QComboBox(CalcFOMDialog)
self.CompPlotTypeComboBox.setGeometry(QtCore.QRect(520, 110, 111, 31))
self.CompPlotTypeComboBox.setObjectName(_fromUtf8("CompPlotTypeComboBox"))
self.label_4 = QtGui.QLabel(CalcFOMDialog)
self.label_4.setGeometry(QtCore.QRect(520, 150, 111, 21))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.compplotsizeLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.compplotsizeLineEdit.setGeometry(QtCore.QRect(520, 170, 111, 22))
self.compplotsizeLineEdit.setObjectName(_fromUtf8("compplotsizeLineEdit"))
self.label_3 = QtGui.QLabel(CalcFOMDialog)
self.label_3.setGeometry(QtCore.QRect(520, 40, 119, 21))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.fomplotchoiceComboBox = QtGui.QComboBox(CalcFOMDialog)
self.fomplotchoiceComboBox.setGeometry(QtCore.QRect(520, 60, 111, 22))
self.fomplotchoiceComboBox.setObjectName(_fromUtf8("fomplotchoiceComboBox"))
self.usedaqtimeCheckBox = QtGui.QCheckBox(CalcFOMDialog)
self.usedaqtimeCheckBox.setGeometry(QtCore.QRect(640, 50, 119, 20))
self.usedaqtimeCheckBox.setObjectName(_fromUtf8("usedaqtimeCheckBox"))
self.label_9 = QtGui.QLabel(CalcFOMDialog)
self.label_9.setGeometry(QtCore.QRect(640, 80, 119, 16))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.aboverangecolLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.aboverangecolLineEdit.setGeometry(QtCore.QRect(640, 100, 119, 22))
self.aboverangecolLineEdit.setObjectName(_fromUtf8("aboverangecolLineEdit"))
self.label_6 = QtGui.QLabel(CalcFOMDialog)
self.label_6.setGeometry(QtCore.QRect(640, 120, 119, 20))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.belowrangecolLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.belowrangecolLineEdit.setGeometry(QtCore.QRect(640, 140, 119, 22))
self.belowrangecolLineEdit.setObjectName(_fromUtf8("belowrangecolLineEdit"))
self.label_8 = QtGui.QLabel(CalcFOMDialog)
self.label_8.setGeometry(QtCore.QRect(640, 160, 119, 21))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.vminmaxLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.vminmaxLineEdit.setGeometry(QtCore.QRect(640, 180, 119, 22))
self.vminmaxLineEdit.setObjectName(_fromUtf8("vminmaxLineEdit"))
self.stdcsvplotchoiceComboBox = QtGui.QComboBox(CalcFOMDialog)
self.stdcsvplotchoiceComboBox.setGeometry(QtCore.QRect(520, 20, 111, 22))
self.stdcsvplotchoiceComboBox.setObjectName(_fromUtf8("stdcsvplotchoiceComboBox"))
self.label_5 = QtGui.QLabel(CalcFOMDialog)
self.label_5.setGeometry(QtCore.QRect(520, 0, 119, 21))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.colormapLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.colormapLineEdit.setGeometry(QtCore.QRect(640, 220, 119, 22))
self.colormapLineEdit.setObjectName(_fromUtf8("colormapLineEdit"))
self.label_10 = QtGui.QLabel(CalcFOMDialog)
self.label_10.setGeometry(QtCore.QRect(640, 200, 119, 21))
self.label_10.setObjectName(_fromUtf8("label_10"))
self.label_13 = QtGui.QLabel(CalcFOMDialog)
self.label_13.setGeometry(QtCore.QRect(290, 150, 219, 21))
self.label_13.setObjectName(_fromUtf8("label_13"))
self.TechTypeRadioButton_0 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_0.setGeometry(QtCore.QRect(290, 170, 219, 16))
self.TechTypeRadioButton_0.setText(_fromUtf8(""))
self.TechTypeRadioButton_0.setObjectName(_fromUtf8("TechTypeRadioButton_0"))
self.TechTypeButtonGroup = QtGui.QButtonGroup(CalcFOMDialog)
self.TechTypeButtonGroup.setObjectName(_fromUtf8("TechTypeButtonGroup"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_0)
self.TechTypeRadioButton_1 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_1.setGeometry(QtCore.QRect(290, 190, 219, 16))
self.TechTypeRadioButton_1.setText(_fromUtf8(""))
self.TechTypeRadioButton_1.setObjectName(_fromUtf8("TechTypeRadioButton_1"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_1)
self.TechTypeRadioButton_2 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_2.setGeometry(QtCore.QRect(290, 210, 219, 16))
self.TechTypeRadioButton_2.setText(_fromUtf8(""))
self.TechTypeRadioButton_2.setObjectName(_fromUtf8("TechTypeRadioButton_2"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_2)
self.TechTypeRadioButton_3 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_3.setGeometry(QtCore.QRect(290, 230, 219, 16))
self.TechTypeRadioButton_3.setText(_fromUtf8(""))
self.TechTypeRadioButton_3.setObjectName(_fromUtf8("TechTypeRadioButton_3"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_3)
self.TechTypeRadioButton_4 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_4.setGeometry(QtCore.QRect(290, 250, 219, 16))
self.TechTypeRadioButton_4.setText(_fromUtf8(""))
self.TechTypeRadioButton_4.setObjectName(_fromUtf8("TechTypeRadioButton_4"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_4)
self.TechTypeRadioButton_5 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_5.setGeometry(QtCore.QRect(290, 270, 219, 16))
self.TechTypeRadioButton_5.setText(_fromUtf8(""))
self.TechTypeRadioButton_5.setObjectName(_fromUtf8("TechTypeRadioButton_5"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_5)
self.TechTypeRadioButton_6 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_6.setGeometry(QtCore.QRect(290, 290, 219, 16))
self.TechTypeRadioButton_6.setText(_fromUtf8(""))
self.TechTypeRadioButton_6.setObjectName(_fromUtf8("TechTypeRadioButton_6"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_6)
self.TechTypeRadioButton_7 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_7.setGeometry(QtCore.QRect(290, 310, 219, 16))
self.TechTypeRadioButton_7.setText(_fromUtf8(""))
self.TechTypeRadioButton_7.setObjectName(_fromUtf8("TechTypeRadioButton_7"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_7)
self.TechTypeRadioButton_8 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_8.setGeometry(QtCore.QRect(290, 330, 219, 16))
self.TechTypeRadioButton_8.setText(_fromUtf8(""))
self.TechTypeRadioButton_8.setObjectName(_fromUtf8("TechTypeRadioButton_8"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_8)
self.TechTypeRadioButton_9 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_9.setGeometry(QtCore.QRect(290, 350, 219, 16))
self.TechTypeRadioButton_9.setText(_fromUtf8(""))
self.TechTypeRadioButton_9.setObjectName(_fromUtf8("TechTypeRadioButton_9"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_9)
self.TechTypeRadioButton_10 = QtGui.QRadioButton(CalcFOMDialog)
self.TechTypeRadioButton_10.setGeometry(QtCore.QRect(290, 370, 219, 16))
self.TechTypeRadioButton_10.setText(_fromUtf8(""))
self.TechTypeRadioButton_10.setObjectName(_fromUtf8("TechTypeRadioButton_10"))
self.TechTypeButtonGroup.addButton(self.TechTypeRadioButton_10)
self.label_7 = QtGui.QLabel(CalcFOMDialog)
self.label_7.setGeometry(QtCore.QRect(10, 110, 267, 21))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.ExpRunUseComboBox = QtGui.QComboBox(CalcFOMDialog)
self.ExpRunUseComboBox.setGeometry(QtCore.QRect(0, 130, 267, 20))
self.ExpRunUseComboBox.setObjectName(_fromUtf8("ExpRunUseComboBox"))
self.label_14 = QtGui.QLabel(CalcFOMDialog)
self.label_14.setGeometry(QtCore.QRect(10, 150, 265, 21))
self.label_14.setObjectName(_fromUtf8("label_14"))
self.RunSelectTreeWidget = QtGui.QTreeWidget(CalcFOMDialog)
self.RunSelectTreeWidget.setGeometry(QtCore.QRect(10, 170, 271, 181))
self.RunSelectTreeWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.RunSelectTreeWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.RunSelectTreeWidget.setHeaderHidden(True)
self.RunSelectTreeWidget.setExpandsOnDoubleClick(False)
self.RunSelectTreeWidget.setObjectName(_fromUtf8("RunSelectTreeWidget"))
self.RunSelectTreeWidget.headerItem().setText(0, _fromUtf8("1"))
self.RunSelectTreeWidget.header().setVisible(False)
self.RunSelectTreeWidget.header().setCascadingSectionResizes(False)
self.RunSelectTreeWidget.header().setStretchLastSection(True)
self.UserFOMLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.UserFOMLineEdit.setGeometry(QtCore.QRect(290, 500, 211, 20))
self.UserFOMLineEdit.setObjectName(_fromUtf8("UserFOMLineEdit"))
self.line_5 = QtGui.QFrame(CalcFOMDialog)
self.line_5.setGeometry(QtCore.QRect(0, 50, 281, 21))
self.line_5.setLineWidth(2)
self.line_5.setFrameShape(QtGui.QFrame.HLine)
self.line_5.setFrameShadow(QtGui.QFrame.Sunken)
self.line_5.setObjectName(_fromUtf8("line_5"))
self.label_21 = QtGui.QLabel(CalcFOMDialog)
self.label_21.setGeometry(QtCore.QRect(290, 480, 219, 20))
self.label_21.setObjectName(_fromUtf8("label_21"))
self.FOMProcessNamesComboBox = QtGui.QComboBox(CalcFOMDialog)
self.FOMProcessNamesComboBox.setGeometry(QtCore.QRect(290, 450, 211, 22))
self.FOMProcessNamesComboBox.setObjectName(_fromUtf8("FOMProcessNamesComboBox"))
self.label_22 = QtGui.QLabel(CalcFOMDialog)
self.label_22.setGeometry(QtCore.QRect(290, 430, 219, 21))
self.label_22.setObjectName(_fromUtf8("label_22"))
self.autoplotCheckBox = QtGui.QCheckBox(CalcFOMDialog)
self.autoplotCheckBox.setGeometry(QtCore.QRect(640, 20, 119, 20))
self.autoplotCheckBox.setChecked(True)
self.autoplotCheckBox.setObjectName(_fromUtf8("autoplotCheckBox"))
self.RaiseErrorPushButton = QtGui.QPushButton(CalcFOMDialog)
self.RaiseErrorPushButton.setGeometry(QtCore.QRect(1120, 0, 31, 21))
self.RaiseErrorPushButton.setObjectName(_fromUtf8("RaiseErrorPushButton"))
self.OpenInfoPushButton = QtGui.QPushButton(CalcFOMDialog)
self.OpenInfoPushButton.setGeometry(QtCore.QRect(70, 10, 91, 21))
self.OpenInfoPushButton.setObjectName(_fromUtf8("OpenInfoPushButton"))
self.expfilenameLineEdit = QtGui.QLineEdit(CalcFOMDialog)
self.expfilenameLineEdit.setGeometry(QtCore.QRect(70, 30, 211, 21))
self.expfilenameLineEdit.setText(_fromUtf8(""))
self.expfilenameLineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.expfilenameLineEdit.setObjectName(_fromUtf8("expfilenameLineEdit"))
self.retranslateUi(CalcFOMDialog)
QtCore.QMetaObject.connectSlotsByName(CalcFOMDialog)
def retranslateUi(self, CalcFOMDialog):
CalcFOMDialog.setWindowTitle(QtGui.QApplication.translate("CalcFOMDialog", "Process Data, Calc FOM from EXP", None, QtGui.QApplication.UnicodeUTF8))
self.BatchComboBox.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Apply all other filteres in this section to only this run", None, QtGui.QApplication.UnicodeUTF8))
self.BatchPushButton.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Considering the files already in the EXP, keep the files that meet all criteria", None, QtGui.QApplication.UnicodeUTF8))
self.BatchPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "Run Batch Process:", None, QtGui.QApplication.UnicodeUTF8))
self.label_11.setText(QtGui.QApplication.translate("CalcFOMDialog", "Analysis description:", None, QtGui.QApplication.UnicodeUTF8))
self.label_17.setText(QtGui.QApplication.translate("CalcFOMDialog", "Analysis name:", None, QtGui.QApplication.UnicodeUTF8))
self.label_18.setText(QtGui.QApplication.translate("CalcFOMDialog", "created by:", None, QtGui.QApplication.UnicodeUTF8))
self.label_19.setText(QtGui.QApplication.translate("CalcFOMDialog", "access:", None, QtGui.QApplication.UnicodeUTF8))
self.UserNameLineEdit.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Comment string to be included in EXP", None, QtGui.QApplication.UnicodeUTF8))
self.UserNameLineEdit.setText(QtGui.QApplication.translate("CalcFOMDialog", "eche", None, QtGui.QApplication.UnicodeUTF8))
self.AnaTypeLineEdit.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Comment string to be included in EXP", None, QtGui.QApplication.UnicodeUTF8))
self.AnaTypeLineEdit.setText(QtGui.QApplication.translate("CalcFOMDialog", "eche", None, QtGui.QApplication.UnicodeUTF8))
self.AnaNameLineEdit.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Comment string to be included in EXP", None, QtGui.QApplication.UnicodeUTF8))
self.AnaNameLineEdit.setText(QtGui.QApplication.translate("CalcFOMDialog", "eche", None, QtGui.QApplication.UnicodeUTF8))
self.AccessLineEdit.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Comment string to be included in EXP", None, QtGui.QApplication.UnicodeUTF8))
self.AccessLineEdit.setText(QtGui.QApplication.translate("CalcFOMDialog", "hte", None, QtGui.QApplication.UnicodeUTF8))
self.label_16.setText(QtGui.QApplication.translate("CalcFOMDialog", "Analysis type:", None, QtGui.QApplication.UnicodeUTF8))
self.AnaDescLineEdit.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Comment string to be included in EXP.\n"
"If you modify the beginning with a\"<comment>;\" the \n"
"comment will remain as you change analysis options", None, QtGui.QApplication.UnicodeUTF8))
self.AnalyzeDataPushButton.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Perform the selected analysis", None, QtGui.QApplication.UnicodeUTF8))
self.AnalyzeDataPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "Analyze Data", None, QtGui.QApplication.UnicodeUTF8))
self.EditDfltVisPushButton.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Edit the FOM visualization parameters in the .csv\n"
"ONLY WORKS ON MOST RECENT \"Analyze Data\"", None, QtGui.QApplication.UnicodeUTF8))
self.EditDfltVisPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "Update Dflt Vis", None, QtGui.QApplication.UnicodeUTF8))
self.ClearAnalysisPushButton.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Clear the analysis, removing intermediate data and FOMs", None, QtGui.QApplication.UnicodeUTF8))
self.ClearAnalysisPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "Clear Analysis", None, QtGui.QApplication.UnicodeUTF8))
self.EditAnalysisParamsPushButton.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Edit parameters involved inthe analysis", None, QtGui.QApplication.UnicodeUTF8))
self.EditAnalysisParamsPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "Edit Params", None, QtGui.QApplication.UnicodeUTF8))
self.SaveAnaPushButton.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Save .fom, FOR THE SELECTED ANALYSIS TYPE ONLY.\n"
" Intermediate data will also be saved", None, QtGui.QApplication.UnicodeUTF8))
self.SaveAnaPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "Save ANA", None, QtGui.QApplication.UnicodeUTF8))
self.ImportAnalysisParamsPushButton.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Import a .par file", None, QtGui.QApplication.UnicodeUTF8))
self.ImportAnalysisParamsPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "Import Params", None, QtGui.QApplication.UnicodeUTF8))
self.ViewResultPushButton.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Send Raw, Intermediate and FOM data to the Visualize window", None, QtGui.QApplication.UnicodeUTF8))
self.ViewResultPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "View Result", None, QtGui.QApplication.UnicodeUTF8))
self.ClearSingleAnalysisPushButton.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Clear the analysis, removing intermediate data and FOMs", None, QtGui.QApplication.UnicodeUTF8))
self.ClearSingleAnalysisPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "Del 1 ana__x", None, QtGui.QApplication.UnicodeUTF8))
self.SaveViewPushButton.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Send Raw, Intermediate and FOM data to the Visualize window", None, QtGui.QApplication.UnicodeUTF8))
self.SaveViewPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "Save+View", None, QtGui.QApplication.UnicodeUTF8))
self.UpdatePlotPushButton.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Edit the FOM visualization parameters in the .csv\n"
"ONLY WORKS ON MOST RECENT \"Analyze Data\"", None, QtGui.QApplication.UnicodeUTF8))
self.UpdatePlotPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "Update Plots", None, QtGui.QApplication.UnicodeUTF8))
self.ImportExpPushButton.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Import a .exp file, which will provide options for the data type, RUNs and analysis type", None, QtGui.QApplication.UnicodeUTF8))
self.ImportExpPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "Import EXP", None, QtGui.QApplication.UnicodeUTF8))
self.ImportAnaPushButton.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Grab the EXP from the \"Create EXP\" window", None, QtGui.QApplication.UnicodeUTF8))
self.ImportAnaPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "Open ANA", None, QtGui.QApplication.UnicodeUTF8))
self.AnalysisNamesComboBox.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "The name of the functions that will be applied to data\n"
"to generate Intermediate and FOM results", None, QtGui.QApplication.UnicodeUTF8))
self.label_20.setText(QtGui.QApplication.translate("CalcFOMDialog", "Choose analysis function:", None, QtGui.QApplication.UnicodeUTF8))
self.getplatemapCheckBox.setText(QtGui.QApplication.translate("CalcFOMDialog", "Get platemaps", None, QtGui.QApplication.UnicodeUTF8))
self.CompPlotOrderComboBox.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Apply all other filteres in this section to only this run", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("CalcFOMDialog", "Element plot order:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("CalcFOMDialog", "Comp. plot type:", None, QtGui.QApplication.UnicodeUTF8))
self.CompPlotTypeComboBox.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Apply all other filteres in this section to only this run", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("CalcFOMDialog", "Comp. point size:", None, QtGui.QApplication.UnicodeUTF8))
self.compplotsizeLineEdit.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Comment string to be included in EXP", None, QtGui.QApplication.UnicodeUTF8))
self.compplotsizeLineEdit.setText(QtGui.QApplication.translate("CalcFOMDialog", "patch", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("CalcFOMDialog", "fom to plot", None, QtGui.QApplication.UnicodeUTF8))
self.fomplotchoiceComboBox.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Apply all other filteres in this section to only this run", None, QtGui.QApplication.UnicodeUTF8))
self.usedaqtimeCheckBox.setText(QtGui.QApplication.translate("CalcFOMDialog", "Use DAQ time", None, QtGui.QApplication.UnicodeUTF8))
self.label_9.setText(QtGui.QApplication.translate("CalcFOMDialog", "above color", None, QtGui.QApplication.UnicodeUTF8))
self.aboverangecolLineEdit.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Comment string to be included in EXP", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("CalcFOMDialog", "below color", None, QtGui.QApplication.UnicodeUTF8))
self.belowrangecolLineEdit.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Comment string to be included in EXP", None, QtGui.QApplication.UnicodeUTF8))
self.label_8.setText(QtGui.QApplication.translate("CalcFOMDialog", "fom range min,max", None, QtGui.QApplication.UnicodeUTF8))
self.vminmaxLineEdit.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Comment string to be included in EXP", None, QtGui.QApplication.UnicodeUTF8))
self.stdcsvplotchoiceComboBox.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Apply all other filteres in this section to only this run", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("CalcFOMDialog", "standard plot", None, QtGui.QApplication.UnicodeUTF8))
self.colormapLineEdit.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Comment string to be included in EXP", None, QtGui.QApplication.UnicodeUTF8))
self.colormapLineEdit.setText(QtGui.QApplication.translate("CalcFOMDialog", "jet", None, QtGui.QApplication.UnicodeUTF8))
self.label_10.setText(QtGui.QApplication.translate("CalcFOMDialog", "colormap", None, QtGui.QApplication.UnicodeUTF8))
self.label_13.setText(QtGui.QApplication.translate("CalcFOMDialog", "Choose analysis scope:", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("CalcFOMDialog", "Primary data type (run_use)", None, QtGui.QApplication.UnicodeUTF8))
self.ExpRunUseComboBox.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "This \"use\" is specified in the EXP \n"
"and determines what types of analysis \n"
"can be performed", None, QtGui.QApplication.UnicodeUTF8))
self.label_14.setText(QtGui.QApplication.translate("CalcFOMDialog", "Choose RUNs to include:", None, QtGui.QApplication.UnicodeUTF8))
self.UserFOMLineEdit.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "enter comma-delimited list of string or\n"
"number FOMS that will become a constant column in the .csv generated by \"Analyze Data\".\n"
"After entry complete, you will be prompted for fom names", None, QtGui.QApplication.UnicodeUTF8))
self.label_21.setText(QtGui.QApplication.translate("CalcFOMDialog", "User-defined FOMs", None, QtGui.QApplication.UnicodeUTF8))
self.FOMProcessNamesComboBox.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "The name of the functions that will be applied to data\n"
"to generate Intermediate and FOM results", None, QtGui.QApplication.UnicodeUTF8))
self.label_22.setText(QtGui.QApplication.translate("CalcFOMDialog", "Choose FOM post-process function:", None, QtGui.QApplication.UnicodeUTF8))
self.autoplotCheckBox.setText(QtGui.QApplication.translate("CalcFOMDialog", "Auto plot ana__x", None, QtGui.QApplication.UnicodeUTF8))
self.RaiseErrorPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "err", None, QtGui.QApplication.UnicodeUTF8))
self.OpenInfoPushButton.setText(QtGui.QApplication.translate("CalcFOMDialog", "Open via Search", None, QtGui.QApplication.UnicodeUTF8))
self.expfilenameLineEdit.setToolTip(QtGui.QApplication.translate("CalcFOMDialog", "Comment string to be included in EXP", None, QtGui.QApplication.UnicodeUTF8))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
CalcFOMDialog = QtGui.QDialog()
ui = Ui_CalcFOMDialog()
ui.setupUi(CalcFOMDialog)
CalcFOMDialog.show()
sys.exit(app.exec_())
```
#### File: JCAPDataProcess/QtForms/SaveButtonForm.py
```python
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_SaveOptionsDialog(object):
def setupUi(self, SaveOptionsDialog):
SaveOptionsDialog.setObjectName(_fromUtf8("SaveOptionsDialog"))
SaveOptionsDialog.resize(353, 63)
self.dfltButton = QtGui.QPushButton(SaveOptionsDialog)
self.dfltButton.setGeometry(QtCore.QRect(10, 20, 75, 23))
self.dfltButton.setObjectName(_fromUtf8("dfltButton"))
self.tempButton = QtGui.QPushButton(SaveOptionsDialog)
self.tempButton.setGeometry(QtCore.QRect(90, 20, 75, 23))
self.tempButton.setObjectName(_fromUtf8("tempButton"))
self.browseButton = QtGui.QPushButton(SaveOptionsDialog)
self.browseButton.setGeometry(QtCore.QRect(170, 20, 75, 23))
self.browseButton.setObjectName(_fromUtf8("browseButton"))
self.cancelButton = QtGui.QPushButton(SaveOptionsDialog)
self.cancelButton.setGeometry(QtCore.QRect(250, 20, 75, 23))
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.retranslateUi(SaveOptionsDialog)
QtCore.QMetaObject.connectSlotsByName(SaveOptionsDialog)
def retranslateUi(self, SaveOptionsDialog):
SaveOptionsDialog.setWindowTitle(QtGui.QApplication.translate("SaveOptionsDialog", "Choose K: folder", None, QtGui.QApplication.UnicodeUTF8))
self.dfltButton.setText(QtGui.QApplication.translate("SaveOptionsDialog", "x", None, QtGui.QApplication.UnicodeUTF8))
self.tempButton.setText(QtGui.QApplication.translate("SaveOptionsDialog", "TEMP", None, QtGui.QApplication.UnicodeUTF8))
self.browseButton.setText(QtGui.QApplication.translate("SaveOptionsDialog", "Browse", None, QtGui.QApplication.UnicodeUTF8))
self.cancelButton.setText(QtGui.QApplication.translate("SaveOptionsDialog", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
SaveOptionsDialog = QtGui.QDialog()
ui = Ui_SaveOptionsDialog()
ui.setupUi(SaveOptionsDialog)
SaveOptionsDialog.show()
sys.exit(app.exec_())
```
#### File: JCAPDataProcess/QtForms/SaveImagesForm.py
```python
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_SaveImagesDialog(object):
def setupUi(self, SaveImagesDialog):
SaveImagesDialog.setObjectName(_fromUtf8("SaveImagesDialog"))
SaveImagesDialog.resize(579, 408)
self.buttonBox = QtGui.QDialogButtonBox(SaveImagesDialog)
self.buttonBox.setGeometry(QtCore.QRect(410, 360, 161, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.FilesTreeWidget = QtGui.QTreeWidget(SaveImagesDialog)
self.FilesTreeWidget.setGeometry(QtCore.QRect(10, 10, 561, 341))
self.FilesTreeWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.FilesTreeWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.FilesTreeWidget.setHeaderHidden(True)
self.FilesTreeWidget.setExpandsOnDoubleClick(False)
self.FilesTreeWidget.setObjectName(_fromUtf8("FilesTreeWidget"))
self.FilesTreeWidget.headerItem().setText(0, _fromUtf8("1"))
self.FilesTreeWidget.header().setVisible(False)
self.FilesTreeWidget.header().setCascadingSectionResizes(False)
self.FilesTreeWidget.header().setStretchLastSection(True)
self.overwriteCheckBox = QtGui.QCheckBox(SaveImagesDialog)
self.overwriteCheckBox.setGeometry(QtCore.QRect(10, 370, 101, 31))
self.overwriteCheckBox.setChecked(True)
self.overwriteCheckBox.setObjectName(_fromUtf8("overwriteCheckBox"))
self.doneCheckBox = QtGui.QCheckBox(SaveImagesDialog)
self.doneCheckBox.setGeometry(QtCore.QRect(120, 370, 81, 31))
self.doneCheckBox.setChecked(False)
self.doneCheckBox.setObjectName(_fromUtf8("doneCheckBox"))
self.epsCheckBox = QtGui.QCheckBox(SaveImagesDialog)
self.epsCheckBox.setGeometry(QtCore.QRect(215, 370, 71, 31))
self.epsCheckBox.setChecked(True)
self.epsCheckBox.setObjectName(_fromUtf8("epsCheckBox"))
self.prependfilenameLineEdit = QtGui.QLineEdit(SaveImagesDialog)
self.prependfilenameLineEdit.setGeometry(QtCore.QRect(285, 380, 113, 20))
self.prependfilenameLineEdit.setObjectName(_fromUtf8("prependfilenameLineEdit"))
self.label = QtGui.QLabel(SaveImagesDialog)
self.label.setGeometry(QtCore.QRect(290, 360, 101, 20))
self.label.setObjectName(_fromUtf8("label"))
self.retranslateUi(SaveImagesDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), SaveImagesDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), SaveImagesDialog.reject)
QtCore.QMetaObject.connectSlotsByName(SaveImagesDialog)
def retranslateUi(self, SaveImagesDialog):
SaveImagesDialog.setWindowTitle(QtGui.QApplication.translate("SaveImagesDialog", "ChooseImagesToSave", None, QtGui.QApplication.UnicodeUTF8))
self.overwriteCheckBox.setText(QtGui.QApplication.translate("SaveImagesDialog", "overwrite files\n"
"with same name", None, QtGui.QApplication.UnicodeUTF8))
self.doneCheckBox.setText(QtGui.QApplication.translate("SaveImagesDialog", "convert\n"
"to .done", None, QtGui.QApplication.UnicodeUTF8))
self.epsCheckBox.setText(QtGui.QApplication.translate("SaveImagesDialog", "also save\n"
".eps", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("SaveImagesDialog", "Prepend to filename:", None, QtGui.QApplication.UnicodeUTF8))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
SaveImagesDialog = QtGui.QDialog()
ui = Ui_SaveImagesDialog()
ui.setupUi(SaveImagesDialog)
SaveImagesDialog.show()
sys.exit(app.exec_())
``` |
{
"source": "johnmgregoire/JCAPdatavis",
"score": 2
} |
#### File: johnmgregoire/JCAPdatavis/custom_photocurrent_single_measurement.py
```python
import time
import os, os.path
import sys
import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import operator
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
import numpy.ma as ma
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import pylab
import pickle
from echem_plate_math import *
from echem_plate_fcns import *
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
from matplotlib.ticker import FuncFormatter
from matplotlib.ticker import ScalarFormatter
matplotlib.rcParams['backend.qt4'] = 'PyQt4'
def myexpformat_2digs(x, pos):
return '%.2e' %x
ExpTickLabels=FuncFormatter(myexpformat_2digs)
RegTickLabels=matplotlib.ticker.ScalarFormatter()
def autotickformat(ax, x=False, y=False, ndec=3):
for bl, xax, lims in zip([x, y], [ax.xaxis, ax.yaxis], [ax.get_xlim(), ax.get_ylim()]):
if bl:
try:
doit=numpy.max(numpy.log10(numpy.abs(numpy.array(lims))))<(-ndec)
doit=doit or numpy.min(numpy.log10(numpy.abs(numpy.array(lims))))>ndec
except:
print 'error on axis formatter for lims ', lims
continue
if doit:
xax.set_major_formatter(ExpTickLabels)
else:
xax.set_major_formatter(RegTickLabels)
class messageDialog(QDialog):
def __init__(self, parent=None, title=''):
super(messageDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
QObject.connect(self.buttonBox, SIGNAL("rejected()"), self.reject)
mainlayout.addWidget(self.buttonBox, 0, 0)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
def ExitRoutine(self):
return
def mygetopenfile(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getOpenFileName(xparent,''.join(['Select file to open:', markstr]),os.path.join(xpath, filename).replace('\\','/')))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getOpenFileName(parent,''.join(['Select file to open: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
def mygetsavefile(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getSaveFileName(xparent,''.join(['Select file for save: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getSaveFileName(parent,''.join(['Select file for save: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
def mygetdir(parent=None, xpath="%s" % os.getcwd(),markstr='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getExistingDirectory(xparent,''.join(['Select directory:', markstr]), xpath))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getExistingDirectory(parent,''.join(['Select directory:', markstr]), xpath))
def userinputcaller(parent, inputs=[('testnumber', int)], title='Enter values', cancelallowed=True):
problem=True
while problem:
idialog=userinputDialog(parent, inputs, title)
idialog.exec_()
problem=idialog.problem
if not idialog.ok and cancelallowed:
return None
inputs=[(tup[0], tup[1], s) for tup, s in zip(inputs, idialog.inputstrlist)]
return idialog.ans
class userinputDialog(QDialog):
def __init__(self, parent, inputs=[('testnumber', int, '')], title='Enter values'):
super(userinputDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.parent=parent
self.inputs=inputs
self.lelist=[]
for i, tup in enumerate(self.inputs):
lab=QLabel()
lab.setText(tup[0])
le=QLineEdit()
if len(tup)>2:
le.setText(tup[2])
self.lelist+=[le]
mainlayout.addWidget(lab, 0, i, 1, 1)
mainlayout.addWidget(le, 1, i, 1, 1)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox, 2, 0, len(inputs), 1)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
self.setLayout(mainlayout)
QMetaObject.connectSlotsByName(self)
self.problem=False
self.ok=False
def ExitRoutine(self):
self.ok=True
self.problem=False
self.ans=[]
self.inputstrlist=[str(le.text()).strip() for le in self.lelist]
for s, tup in zip(self.inputstrlist, self.inputs):
if tup[1]==str:
try:
self.ans+=[s]
except:
self.problem=True
break
else:
try:
n=myeval(s)
self.ans+=[tup[1](n)]
except:
self.problem=True
break
if self.problem:
idialog=messageDialog(self, 'problem with conversion of ' + tup[0])
idialog.exec_()
class tempDialog(QDialog):
def __init__(self, parent=None, title='', folderpath=None):
super(tempDialog, self).__init__(parent)
self.parent=parent
# self.echem30=echem30axesWidget()
# self.echem30.show()
self.plotillumkey=None
self.techniquedictlist=[]
self.plotw_0=plotwidget(self)
self.plotw_1=plotwidget(self)
self.plotw_2=plotwidget(self)
self.plotw_3=plotwidget(self)
self.plotw_0.fig.subplots_adjust(left=.2)
self.plotw_1.fig.subplots_adjust(left=.2)
self.plotw_2.fig.subplots_adjust(left=.2)
self.plotw_3.fig.subplots_adjust(left=.2)
#in options, always make an option that does not require user input at index 0
CVops=[\
['Imax', ['I(A)'], []], \
['Imin', ['I(A)'], []], \
['E_Ithresh', ['I(A)','Ewe(V)'], [['Ithresh(A)', float, '1e-5'], ['Num consec points', int, '20'], ['0 for below, 1 for above', int, '1'], ['Thresh not reached value', float, '1']]], \
['Eh in I=Io Exp(E/Eh)', ['I(A)', 'Ewe(V)'], []], \
['Io in I=Io Exp(E/Eh)', ['I(A)', 'Ewe(V)'], []], \
['Iphoto_max', ['Illum', 'I(A)', 'Ewe(V)', 't(s)'], [['frac of Illum segment start', float, '0.4'], ['frac of Illum segment end', float, '0.95'], ['frac of Dark segment start', float, '0.4'], ['frac of Dark segment end', float, '0.95'], ['Illum signal key', str, 'Toggle'], ['Illum signal time shift (s)', float, '0.'], ['Illum Threshold', float, '0.5'], ['Illum Invert', int, '0'], ['num illum cycles', int, '2'], ['0 from beginning, 1 from end', int, '1']]], \
['Iphoto_min', ['Illum', 'I(A)', 'Ewe(V)', 't(s)'], [['frac of Illum segment start', float, '0.4'], ['frac of Illum segment end', float, '0.95'], ['frac of Dark segment start', float, '0.4'], ['frac of Dark segment end', float, '0.95'], ['Illum signal key', str, 'Toggle'], ['Illum signal time shift (s)', float, '0.'], ['Illum Threshold', float, '0.5'], ['Illum Invert', int, '0'], ['num illum cycles', int, '2'], ['0 from beginning, 1 from end', int, '1']]], \
['None', ['I(A)', 'Ewe(V)'], []], \
]
OCVops=[\
['Efin', ['Ewe(V)'], []], \
['Eave', ['Ewe(V)', 't(s)'], [['Interval(s)', float, '2.'], ['Num StdDev outlier', float, '2.'], ['Num Pts in Window', int, '999999'], ['0 from beginning, 1 from end', int, '1']]], \
['Ess', ['Ewe(V)'], [['Weight Exponent for NumPts', float, '1.'], ['NumPts test interval', int, '10']]], \
['Ephoto', ['Illum', 'Ewe(V)', 'I(A)', 't(s)'], [['frac of Illum segment start', float, '0.4'], ['frac of Illum segment end', float, '0.95'], ['frac of Dark segment start', float, '0.4'], ['frac of Dark segment end', float, '0.95'], ['Illum signal key', str, 'Toggle'], ['Illum signal time shift (s)', float, '0.'], ['Illum Threshold', float, '0.5'], ['Illum Invert', int, '0'], ['num illum cycles', int, '2'], ['0 from beginning, 1 from end', int, '1']]], \
]
CPops=[\
['Efin', ['Ewe(V)'], []], \
['Eave', ['Ewe(V)', 't(s)'], [['Interval(s)', float, '2.'], ['Num StdDev outlier', float, '2.'], ['Num Pts in Window', int, '999999'], ['0 from beginning, 1 from end', int, '1']]], \
['Ess', ['Ewe(V)'], [['Weight Exponent for NumPts', float, '1.'], ['NumPts test interval', int, '10']]], \
['Ephoto', ['Illum', 'Ewe(V)', 'I(A)', 't(s)'], [['frac of Illum segment start', float, '0.4'], ['frac of Illum segment end', float, '0.95'], ['frac of Dark segment start', float, '0.4'], ['frac of Dark segment end', float, '0.95'], ['Illum signal key', str, 'Toggle'], ['Illum signal time shift (s)', float, '0.'], ['Illum Threshold', float, '0.5'], ['Illum Invert', int, '0'], ['num illum cycles', int, '2'], ['0 from beginning, 1 from end', int, '1']]], \
]
CAops=[\
['Ifin', ['I(A)'], []], \
['Iave', ['I(A)', 't(s)'], [['Interval(s)', float, '2.'], ['Num StdDev outlier', float, '2.'], ['Num Pts in Window', int, '999999'], ['0 from beginning, 1 from end', int, '1']]], \
['Iss', ['I(A)'], [['Weight Exponent for NumPts', float, '1.'], ['NumPts test interval', int, '10']]], \
['Iphoto', ['Illum', 'I(A)', 'Ewe(V)', 't(s)'], [['frac of Illum segment start', float, '0.4'], ['frac of Illum segment end', float, '0.95'], ['frac of Dark segment start', float, '0.4'], ['frac of Dark segment end', float, '0.95'], ['Illum signal key', str, 'Toggle'], ['Illum signal time shift (s)', float, '0.'], ['Illum Threshold', float, '0.5'], ['Illum Invert', int, '0'], ['num illum cycles', int, '2'], ['0 from beginning, 1 from end', int, '1']]], \
]
Bubbleops=[\
['slopefin', ['Maxslope'], []], \
['Intfin', ['Intensity'], []], \
]
self.expmnt_calc_options=[['OCV', OCVops], ['CP', CPops], ['CA', CAops], ['CV', CVops], ['Bubble', Bubbleops]]
self.expmnt_calc_lastusedvals=[[[] for calcopt in opslist] for opname, opslist in self.expmnt_calc_options]
folderButton=QPushButton()
folderButton.setText("select\nfile")
QObject.connect(folderButton, SIGNAL("pressed()"), self.selectfile)
updateButton=QPushButton()
updateButton.setText("update\ndata")
QObject.connect(updateButton, SIGNAL("pressed()"), self.calcandplot)
saveButton=QPushButton()
saveButton.setText("save\nspreadhseet")
QObject.connect(saveButton, SIGNAL("pressed()"), self.writeillumtxt)
paramsButton=QPushButton()
paramsButton.setText("edit params")
QObject.connect(paramsButton, SIGNAL("pressed()"), self.getcalcparams)
addButton=QPushButton()
addButton.setText("add to\nfig 4")
QObject.connect(addButton, SIGNAL("pressed()"), self.addplot)
self.labelLineEdit=QLineEdit()
self.overlayselectCheckBox=QCheckBox()
self.overlayselectCheckBox.setText("overlay")
savebuttonlayout=QHBoxLayout()
savebuttonlayout.addWidget(folderButton)
savebuttonlayout.addWidget(paramsButton)
savebuttonlayout.addWidget(updateButton)
savebuttonlayout.addWidget(saveButton)
savebuttonlayout.addWidget(addButton)
savebuttonlayout.addWidget(self.labelLineEdit)
savebuttonlayout.addWidget(self.overlayselectCheckBox)
mainlayout=QGridLayout()
mainlayout.addLayout(savebuttonlayout, 0, 0, 1, 2)
mainlayout.addWidget(self.plotw_0, 1, 0)
mainlayout.addWidget(self.plotw_1, 1,1)
mainlayout.addWidget(self.plotw_2, 2, 0)
mainlayout.addWidget(self.plotw_3, 2, 1)
self.setLayout(mainlayout)
self.resize(1100, 850)
self.selectfile()
self.getcalcparams()
self.calcandplot()
def selectfile(self, plate_id=None, selectexids=None, folder=None):
self.p=mygetopenfile(self, markstr='select CA photo .txt file')
self.techdict=readechemtxt(self.p)
def getcalcparams(self):
i=2
j=3
tup=self.expmnt_calc_options[i][1][j]
inputs=tup[2]
if len(self.expmnt_calc_lastusedvals[i][j])==len(inputs):
for count, v in enumerate(self.expmnt_calc_lastusedvals[i][j]):
inputs[count][2]=(isinstance(v, str) and (v,) or (`v`,))[0]
if len(inputs)>0:
self.CalcParams=userinputcaller(self, inputs=inputs, title='Enter Calculation Parameters')
self.expmnt_calc_lastusedvals[i][j]=self.CalcParams
print type(self.expmnt_calc_lastusedvals[i][j]), self.expmnt_calc_lastusedvals[i][j]
#self.CalcAllFOM()
#self.plot()
def CalcFOM(self):
i=2
j=3
tup=self.expmnt_calc_options[i][1][j]
fcnnam=tup[0]
self.calckeys=tup[1]
ikey=self.CalcParams[4]
tshift=self.CalcParams[5]
if tshift!=0:
newikey='IllumMod'
self.techdict[newikey]=illumtimeshift(self.techdict, ikey, self.calckeys[3], tshift)
ikey=newikey
if self.CalcParams[7]!=0:
self.techdict[ikey]*=-1
elif self.CalcParams[7]!=0:
newikey='IllumMod'
self.techdict[newikey]=-1*self.techdict[ikey]
ikey=newikey
illkey=self.calckeys[1]+'_illdiff'
err=calcdiff_ill_caller(self.techdict, ikey=ikey, thresh=self.CalcParams[6], ykeys=[self.calckeys[1]], xkeys=list(self.calckeys[2:]), illfracrange=(self.CalcParams[0], self.CalcParams[1]), darkfracrange=(self.CalcParams[2], self.CalcParams[3]))
try:
if err or len(self.techdict[illkey])==0:
return 0
self.plotillumkey='IllumBool'
ncycs=self.CalcParams[8]
fromend=self.CalcParams[9]
if fromend:
arr=self.techdict[illkey][::-1]
else:
arr=self.techdict[illkey]
arr=arr[:ncycs]
if 'min' in fcnnam:
returnval=min(arr)
elif 'max' in fcnnam:
returnval=max(arr)
else:
returnval=numpy.mean(arr)
except:
print 'ERROR PHOTOCURRENT CALCULATION'
return None
return returnval
def calcandplot(self, ext='.txt', dbupdate=False):
fom=self.CalcFOM()
if fom is None:
return
print 'FOM:', fom
self.plotw_0.axes.cla()
self.plotw_0.axes.plot(self.techdict['t(s)'], self.techdict['I(A)'],'b.-')
self.plotw_0.axes.set_xlabel('t(s)')
self.plotw_0.axes.set_ylabel('I(A)')
autotickformat(self.plotw_0.axes, x=0, y=1)
self.plotw_0.fig.canvas.draw()
self.plotw_1.axes.cla()
self.plotw_1.axes.plot(self.techdict['t(s)'], self.techdict['I(A)'],'b-')
self.plotw_1.axes.plot(self.techdict['t(s)_dark'], self.techdict['I(A)_dark'],'g.')
self.plotw_1.axes.plot(self.techdict['t(s)_ill'], self.techdict['I(A)_ill'],'k.')
self.plotw_1.axes.set_xlabel('t(s)')
self.plotw_1.axes.set_ylabel('I(A)')
autotickformat(self.plotw_1.axes, x=0, y=1)
self.plotw_1.fig.canvas.draw()
self.plotw_2.axes.cla()
self.plotw_2.axes.plot(self.techdict['t(s)_ill'], self.techdict['I(A)_illdiff'],'k.-')
self.plotw_2.axes.set_xlabel('t(s)')
self.plotw_2.axes.set_ylabel('Iphoto(A)')
autotickformat(self.plotw_2.axes, x=0, y=1)
self.plotw_2.fig.canvas.draw()
def addplot(self):
lab=str(self.labelLineEdit.text())
if not self.overlayselectCheckBox.isChecked():
self.plotw_3.axes.cla()
self.plotw_3.axes.plot(self.techdict['t(s)_ill'], self.techdict['I(A)_illdiff'],'.-', label=lab)
self.plotw_3.axes.set_xlabel('t(s)')
self.plotw_3.axes.set_ylabel('Iphoto(A)')
autotickformat(self.plotw_3.axes, x=0, y=1)
self.plotw_3.axes.legend(loc=0).draggable()
self.plotw_3.fig.canvas.draw()
def writeillumtxt(self, p=None, explab=None, saved=False):
if p is None:
p=mygetsavefile(parent=self, markstr='save spreadsheet of dark and illum photocurrent', xpath=self.p)
if not p:
print 'save aborted'
return
labels=['t(s)_dark', 'I(A)_dark', 't(s)_ill', 'I(A)_ill', 'I(A)_illdiff']
lines=['%column_headings='+'\t'.join(labels)]
lines+=['\t'.join(tup) for tup in zip(*[['%.3e' %v for v in self.techdict[k]] for k in labels])]
s='\n'.join(lines)
f=open(p, mode='w')
f.write(s)
f.close()
if saved:
f=open(p[:-4]+'.pck', mode='w')
pickle.dump(self.techdict, f)
f.close()
class messageDialog(QDialog):
def __init__(self, parent=None, title=''):
super(messageDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
QObject.connect(self.buttonBox, SIGNAL("rejected()"), self.reject)
mainlayout.addWidget(self.buttonBox, 0, 0)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
def ExitRoutine(self):
return
class plotwidget(FigureCanvas):
def __init__(self, parent, width=12, height=6, dpi=72, projection3d=False):
#plotdata can be 2d array for image plot or list of 2 1d arrays for x-y plot or 2d array for image plot or list of lists of 2 1D arrays
self.fig=Figure(figsize=(width, height), dpi=dpi)
if projection3d:
self.axes=self.fig.add_subplot(111, navigate=True, projection='3d')
else:
self.axes=self.fig.add_subplot(111, navigate=True)
self.axes.hold(True)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
#self.parent=parent
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
#NavigationToolbar(self, parent)
NavigationToolbar(self, self)
self.mpl_connect('button_press_event', self.myclick)
self.clicklist=[]
def myclick(self, event):
if not (event.xdata is None or event.ydata is None):
arrayxy=[event.xdata, event.ydata]
print 'clicked on image: array indeces ', arrayxy, ' using button', event.button
self.clicklist+=[arrayxy]
self.emit(SIGNAL("genericclickonplot"), [event.xdata, event.ydata, event.button])
if __name__ == "__main__":
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True, **kwargs):#, TreeWidg):
super(MainMenu, self).__init__(None)
#self.setupUi(self)
self.expui=tempDialog(self, title='Custom Photocurrent Calculator', **kwargs)
#self.expui.importruns(pathlist=['20150422.145113.donex.zip'])
#self.expui.importruns(pathlist=['uvis'])
if execute:
self.expui.exec_()
#os.chdir('//htejcap.caltech.edu/share/home/users/hte/demo_proto')
mainapp=QApplication(sys.argv)
form=MainMenu(None)
form.show()
form.setFocus()
#form.expui.exec_()
mainapp.exec_()
```
#### File: johnmgregoire/JCAPdatavis/cvprocess_tempTafel_pickledlist.py
```python
import time, copy
import os, os.path
import sys
import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from scipy import optimize
from echem_plate_ui import *
from echem_plate_math import *
import pickle
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20120728NiFeCoTiplate1_test21Aug2012'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate1_dlist.dat'
#os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate1_LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate1'
#vshift=-.2
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9FeCoNiTi_500C_fast_CPCV_plate3_dlist.dat'
##os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate3_LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate3'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9FeCoNiTi_500C_fast_CPCV_plate2_dlist.dat'
##os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate3_LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate2'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCPCV_plate1_dlist.dat'
#os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate1_LinSubPlots2')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate1'
#
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastrep2_plate1_dlist.dat'
##os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate3_LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastrep2_plate1'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastrep3_plate1_dlist.dat'
##os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate3_LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastrep3_plate1'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9FeCoNiTi_500C_CPCV_Plate3-rerun_dlist.dat'
##os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fastCV_plate3_LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/2012-9_FeCoNiTi/results/2012-9_FeCoNiTi_500C_fast_plate3'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121108NiFeCoAl_F/results/NiFeCoAl_F_plate3_dlist.dat'
#os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121108NiFeCoAl_F/results/plate3/LinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121108NiFeCoAl_F/results/'
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results/20121031NiFeCoTi_P_plate1_dlist.dat'#20121101NiFeCoTi_P_plate3_dlist.dat'
#os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results/plate1/LogLinSubPlots')
#savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20121031NiFeCoTi_P/results/plate1'
pl=2
os.chdir('C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/results/plate%d/LogLinSubPlots'%pl)
savefolder='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/results/plate%d' %pl
if pl==1:
p='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/results/20130402NiFeCoCe_Plate1_5500_dlist.dat';vshift=-(.187-.0)
elif pl==2:
p='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/results/20130403NiFeCoCe_Plate2_5498_dlist.dat';vshift=-(.187-0)
elif pl==3:
p='C:/Users/gregoire/Documents/EchemDropRawData/NiFeCoCe/results/20130403NiFeCoCe_Plate3_4835_dlist.dat';vshift=-(.187-0)
#vshift=0.#-.177#-.24
f=open(p, mode='r')
dlist=pickle.load(f)
f.close()
##filter dlist
dlist=[d for d in dlist if 'I(A)_LinSub' in d.keys()]
SGpts=10
booldev_frac=.5#this is for a binary array so anything between 0 and 1 is legit
booldev_nout=3
booldn_segstart=3*booldev_nout
dx=d['dE']
dydev_frac=.2
dydev_nout=5
dn_segstart=3*dydev_nout
dydev_abs=0.
plotbool=0
SegSG_dlist(dlist, SGpts=SGpts, order=1, k='I(A)_LinSub')
##save csv of FOM
##calculate V for critical I, etc
for count, d in enumerate(dlist):
inds=d['segprops_dlist'][0]['inds']
d['CV6fwdImax']=numpy.max(d['I(A)'][inds])
i=d['I(A)_LinSub_SG'][inds]
v=d['Ewe(V)'][inds]
posinds=numpy.where(i>5e-8)
invboolarr=numpy.float32(i<=5.e-8)
istart_segs, len_segs, fitdy_segs, fitinterc_segs=findzerosegs(invboolarr, booldev_frac, booldev_nout, booldn_segstart, SGnpts=10, plotbool=False, dx=1., maxfracoutliers=.5)
if len(istart_segs)==0:
print 'no positive segments found for ', count, ', sample ', d['Sample']
continue
ind=numpy.argmax(len_segs)
i0=istart_segs[ind]
i1=i0+len_segs[ind]
taffitinds=numpy.arange(i0, i1)
d['segprops_dlist'][0]['TafelFitInds']=inds[taffitinds]
i=i[i0:i1]
i[i<5e-8]=5e-8 #needed due to outliers
v=v[i0:i1]
il=numpy.log10(i)
istart_segs, len_segs, fitdy_segs, fitinterc_segs, dy=findlinearsegs(il, dydev_frac, dydev_nout, dn_segstart, dydev_abs=dydev_abs, plotbool=plotbool, dx=dx, critdy_fracmaxdy=None)
if len(istart_segs)==0:
print 'no Tafel segments found for ', count, ', sample ', d['Sample']
continue
ind=numpy.argmax(len_segs)
i0=istart_segs[ind]
i1=i0+len_segs[ind]
tafinds=numpy.arange(i0, i1)
it=il[tafinds]
vt=v[tafinds]
d['segprops_dlist'][0]['TafelInds']=inds[taffitinds][tafinds]
d['segprops_dlist'][0]['TafelSlope']=fitdy_segs[ind]
d['segprops_dlist'][0]['TafelEstart_TafelValue']=(v[0], fitinterc_segs[ind])
##making 10-sample plots of linear subtraction
cols=['k','b', 'g', 'r', 'c', 'm', 'y', 'brown', 'purple', 'grey']
smpall=numpy.array([d['Sample'] for d in dlist])
dinds=numpy.argsort(smpall)
plotcount=0
smpl=[]
pylab.figure()
for di in dinds:
d=dlist[di]
if plotcount==10:
s='_'.join([`smp` for smp in smpl])
pylab.title(s)
pylab.savefig(s)
plotcount=0
smpl=[]
pylab.figure()
for segd in d['segprops_dlist']:#[2:3]:
for st, k in zip([':', '--', '-'], ['inds', 'TafelFitInds', 'TafelInds']):
if not k in segd.keys():
continue
x=d['Ewe(V)'][segd[k]]
y=d['I(A)_LinSub'][segd[k]]
posinds=numpy.where(y>5e-8)
x=x[posinds]
y=numpy.log10(y[posinds])
pylab.plot(x, y, st, color=cols[plotcount])
break
smpl+=[d['Sample']]
plotcount+=1
#savekeys=['SegIndStart_LinSub','LinLen_LinSub','Intercept_LinSub','dIdt_LinSub', 'ImaxCVLinSub', 'V_IthreshCVLinSub', 'I650mVLinSub', 'CV6fwdImax']
#
#
#mainapp=QApplication(sys.argv)
#form=MainMenu(None, execute=False, folderpath=savefolder)
#echemvis=form.echem
#echemvis.techniquedictlist=dlist
#
#
#def savefom(dlist, savefolder, key):
# for d in dlist:
# d['FOM']=d[key]
# echemvis.writefile(p=savefolder, explab=key)
#
#for skey in savekeys:
# savefom(echemvis.techniquedictlist, savefolder, skey)
if 0:
f=open(p, mode='w')
pickle.dump(dlist, f)
f.close()
```
#### File: johnmgregoire/JCAPdatavis/echem_cvprocess_temp.py
```python
import matplotlib.cm as cm
import numpy
import pylab
import h5py, operator, copy, os, csv, sys
from echem_plate_fcns import *
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(PyCodePath,'ternaryplot'))
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
from quaternary_FOM_stackedtern import *
#os.chdir(cwd)
def myeval(c):
if c=='None':
c=None
else:
temp=c.lstrip('0')
if (temp=='' or temp=='.') and '0' in c:
c=0
else:
c=eval(temp)
return c
ellabels=['Fe', 'Co', 'Ni', 'Ti']
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop')
rootstr='20120728NiFeCoTiplate1'
#expstr='CV2V_Ithresh'
#fomlabel='Potential for 0.1mA (V vs H$_2$0/O$_2$)'
#fomshift=-.2
#vmin=.3
#vmax=.6
expstr='CV2'
fomlabel='Potential for 0.02mA (V vs H$_2$0/O$_2$)'
fomshift=-.2
vmin=.21
vmax=.45
cmap=cm.jet_r
aboverangecolstr='k'
belowrangecolstr=''
dpl=['', '', '']
for root, dirs, files in os.walk(os.getcwd()):
testfn=[fn for fn in files if (rootstr in fn) and (expstr in fn)]
for fn in testfn:
for count in range(3):
if ('plate%d' %(count+1)) in fn:
dpl[count]=os.path.join(root, fn)
print 'FOM file paths:'
for dp in dpl:
print dp
savefolder='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20120728NiFeCoTi_allplateresults'
dropdl=[]
for dp in dpl:
f=open(dp, mode='r')
dr=csv.DictReader(f, delimiter='\t')
dropd={}
for l in dr:
for kr in l.keys():
k=kr.strip()
if not k in dropd.keys():
dropd[k]=[]
dropd[k]+=[myeval(l[kr].strip())]
for k in dropd.keys():
dropd[k]=numpy.array(dropd[k])
f.close()
dropdl+=[dropd]
#pylab.show()
```
#### File: johnmgregoire/JCAPdatavis/echem_plate_CAill_test3.py
```python
import time, copy
import os, os.path
import sys, operator
import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from scipy import optimize
from echem_plate_ui import *
from echem_plate_math import *
p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/201212_BiVNiFe/201212_BiVNiFe_plate1_4026'
os.chdir('C:/Users/Gregoire/Documents/CaltechWork/echemdrop/201212_BiVNiFe/results/')
mainapp=QApplication(sys.argv)
form=MainMenu(None, execute=False, folderpath=p)
echemvis=form.echem
echemvis.expmntLineEdit.setText('CA5')
echemvis.get_techniquedictlist(nfiles=2)
dlist=echemvis.techniquedictlist
d=dlist[-1]
ikey=[.5, 9999, .5, 1]
illfracrange=(.4, 1.)#(.7, .95)
darkfracrange=(.4, 1.)#(.7, .95)
ykeys=['I(A)']
xkeys=['t(s)', 'Ewe(V)']
#def calc_choppedill(d, ikey='Illum', ykeys=['I(A)'], xkeys=['t(s)', 'Ewe(V)'], illfracrange=(.4, .95), darkfracrange=(.4, .95)):
if isinstance(ikey, list) or isinstance(ikey, numpy.ndarray):
lv_dark, lv_ill, lv_duty, lv_period=ikey
illum=numpy.zeros(len(d['t(s)']), dtype='bool')
indsill=numpy.where((d['t(s)']>lv_dark)&(d['t(s)']<=lv_ill))[0]
till=d['t(s)'][indsill]
till-=till[0]
cycfrac=(till%lv_period)/lv_period
illum[indsill[cycfrac<=lv_duty]]=1
d['Illumcalc']=illum
else:
illum=d[ikey]!=0
istart_len_calc=lambda startind, endind, fracrange: (startind+numpy.floor(fracrange[0]*(endind-startind)), numpy.ceil((fracrange[1]-fracrange[0])*(endind-startind)))
riseinds=numpy.where(illum[1:]&numpy.logical_not(illum[:-1]))[0]+2
fallinds=numpy.where(numpy.logical_not(illum[1:])&illum[:-1])[0]+2
if len(fallinds)<2 or len(riseinds)==0:
print 'insufficint light cycles'
riseinds=riseinds[riseinds<fallinds[-1]]#only consider illum if there is a dark before and after
fallinds=fallinds[fallinds>riseinds[0]]
if len(fallinds)<2 or len(riseinds)==0:
print 'insufficint light cycles'
print '***'
ill_istart, ill_len=istart_len_calc(riseinds, fallinds, illfracrange)
darkstart, darkend=numpy.where(numpy.logical_not(illum))[0][[0, -1]]
dark_istart, dark_len=istart_len_calc(numpy.concatenate([[darkstart], fallinds]), numpy.concatenate([riseinds, [darkend]]), darkfracrange)
#inds_ill=[range(int(i0), int(i0+ilen)) for i0, ilen in zip(ill_istart, ill_len)]
#inds_dark=[range(int(i0), int(i0+ilen)) for i0, ilen in zip(dark_istart, dark_len)]
indstemp=[(range(int(i0ill), int(i0ill+ilenill)), range(int(i0dark), int(i0dark+ilendark))) for i0ill, ilenill, i0dark, ilendark in zip(ill_istart, ill_len, dark_istart, dark_len) if ilenill>0 and ilendark>0]
inds_ill=map(operator.itemgetter(0), indstemp)
inds_dark=map(operator.itemgetter(1), indstemp)
if dark_len[-1]>0:
inds_dark+=[range(int(dark_start[-1]), int(dark_start[-1]+dark_len[-1]))]
else:
inds_ill=inds_ill[:-1]
d['inds_ill']=inds_ill
d['inds_dark']=inds_dark
getillvals=lambda arr:numpy.array([arr[inds].mean() for inds in inds_ill])
getdarkvals=lambda arr:numpy.array([arr[inds].mean() for inds in inds_dark])
for k in xkeys+ykeys:
d[k+'_ill']=getillvals(d[k])
d[k+'_dark']=getdarkvals(d[k])
for k in ykeys:
d[k+'_illdiff']=d[k+'_ill']-0.5*(d[k+'_dark'][:-1]+d[k+'_dark'][1:])
d[k+'_illdiffmean']=numpy.mean(d[k+'_illdiff'])
d[k+'_illdiffstd']=numpy.std(d[k+'_illdiff'])
#calc_choppedill(d, ikey=[.5, 9999, .5, 1], illfracrange=(.7, .95), darkfracrange=(.7, .95))
k='I(A)'
darkbaseline=d[k+'_dark'].mean()
illwrtdark=d[k+'_illdiff']+darkbaseline
pylab.plot(d['t(s)'], d['I(A)'], 'g-')
pylab.plot(d['t(s)'], d['I(A)'], 'g.')
pylab.plot(d['t(s)_ill'], d[k+'_ill'], 'c_')
pylab.plot(d['t(s)_dark'], d[k+'_dark'], 'b_')
for inds in d['inds_ill']:
pylab.plot(d['t(s)'][inds], d['I(A)'][inds], 'm.')
for inds in d['inds_dark']:
pylab.plot(d['t(s)'][inds], d['I(A)'][inds], 'k.')
pylab.plot([min(d['t(s)']), max(d['t(s)'])], [darkbaseline, darkbaseline], '-', color=(.5, .5, .5))
pylab.plot(d['t(s)_ill'], illwrtdark, 'ro')
pylab.twinx()
#pylab.plot(d['t(s)'], d['Illum'], 'y-')
pylab.plot(d['t(s)'], d['Illumcalc'], 'y-')
pylab.plot(d['t(s)'], d['Illumcalc'], 'y.')
#pylab.show()
#pylab.figure()
##d={}
##d['t(s)']=numpy.arange(100)/10.
#ikey=[1, 9999, .5, 2]
#lv_dark, lv_ill, lv_duty, lv_period=ikey
#illum=numpy.zeros(len(d['t(s)']), dtype='bool')
#indsill=numpy.where((d['t(s)']>lv_dark)&(d['t(s)']<=lv_ill))[0]
#till=d['t(s)'][indsill]
#till-=till[0]
#cycfrac=(till%lv_period)/lv_period
#illum[indsill[cycfrac<=lv_duty]]=1
#
#pylab.plot(d['t(s)'], illum, 'k-')
#pylab.ylim([-.1, 1.1])
pylab.show()
```
#### File: johnmgregoire/JCAPdatavis/echem_plate_fcns.py
```python
import numpy
from matplotlib.ticker import FuncFormatter
import matplotlib.colors as colors
from echem_plate_math import *
import time
def myexpformat(x, pos):
for ndigs in range(5):
lab=(('%.'+'%d' %ndigs+'e') %x).replace('e+0','e').replace('e+','e').replace('e0','').replace('e-0','e-')
if eval(lab)==x:
return lab
return lab
ExpTickLabels=FuncFormatter(myexpformat)
are_paths_equivalent=lambda path1, path2:os.path.normcase(os.path.abspath(path1))==os.path.normcase(os.path.abspath(path2))
def attemptnumericconversion(s):
if (s.replace('.', '', 1).replace('e', '', 1).replace('+', '', 1).replace('-', '', 1)).isalnum():
try:
return myeval(s)
except:
pass
return s
def readechemtxtold(path):
f=open(path, mode='r')
lines=f.readlines()
f.close()
d={}
z=[]
for l in lines:
if l.startswith('%'):
a, b, c=l.strip('%').strip().partition('=')
a=a.strip()
c=c.strip()
if a=='elements' or a=='column_headings' or a=='compositions':
val=[]
while len(c)>0:
b, garb, c=c.strip().partition('\t')
val+=[b]
if a=='compositions':
val=[attemptnumericconversion(v) for v in val]
try:
val=numpy.float32(val)
print val
if numpy.any(numpy.isnan(val)):
val=numpy.ones(len(val), dtype='float32')/len(val)
print val
break
except:
pass
elif a=='x' or a=='y':
val=attemptnumericconversion(c.replace('mm', '').strip())
else:
val=attemptnumericconversion(c)
d[a]=val
else:
a=[]
c=l.strip()
while len(c)>0:
b, garb, c=c.strip().partition('\t')
a+=[myeval(b)]
if len(z)==0 or len(a)==len(z[-1]):
z+=[a]
for k, arr in zip(d['column_headings'], numpy.float32(z).T):
d[k]=arr
return d
def readechemtxt(path, mtime_path_fcn=None):
try:#need to sometimes try twice so might as well try 3 times
f=open(path, mode='r')
except:
try:
f=open(path, mode='r')
except:
f=open(path, mode='r')
lines=f.readlines()
f.close()
d={}
z=[]
for count, l in enumerate(lines):
if l.startswith('%'):
a, b, c=l.strip('%').strip().partition('=')
a=a.strip()
c=c.strip()
if a=='elements' or a=='column_headings' or a=='compositions':
val=[]
while len(c)>0:
b, garb, c=c.strip().replace('\\t', '\t').partition('\t')
val+=[b]
if a=='compositions':
val=[attemptnumericconversion(v) for v in val]
try:
val=numpy.float32(val)
if numpy.any(numpy.isnan(val)):
raise
except:
val=numpy.ones(len(val), dtype='float32')/len(val)
pass
elif a=='x' or a=='y':
val=attemptnumericconversion(c.replace('mm', '').strip())
else:
val=attemptnumericconversion(c)
d[a]=val
else:
break
if len(lines[count:])==0:
return {}
try:
z=[map(float, l.strip().replace('\\t', '\t').split('\t')) for l in lines[count:] if len(l.strip())>0]
except:
print l
print '\t' in l
print l.split('\t')
print map(float, l.split('\t'))
raise
for k, arr in zip(d['column_headings'], numpy.float32(z).T):
d[k]=arr
d['path']=path
if not mtime_path_fcn is None:
d['mtime']=mtime_path_fcn(path)
return d
def getarrfromkey(dlist, key):
return numpy.array([d[key] for d in dlist])
def col_string(s):
s=s.strip()
if ('(' in s) and (')' in s):
try:
s=eval(s)
except:
return None
cc=colors.ColorConverter()
return cc.to_rgb(s)
def unroundcompositions(dropd):
c=dropd['compositions']
dropd['compositions']=numpy.round(c*30.)/30.
return
def addcodetoplatemapgen1dlist(dlist=None, dropd=None):
code2lims=[(2081, 2109), (4193, 4221), (6305, 6333)]
code2inds=[]
for a, b in code2lims:
code2inds+=range(a, b)
if not dlist is None:
for d in dlist:
s=d['Sample']
cs=d['compositions'].sum()
if s in code2inds and cs>0.:
d['code']=2
elif cs==0.:
d['code']=1
else:
d['code']=0
else:
s=dropd['Sample']
cs=dropd['compositions'].sum(axis=1)
code=numpy.zeros(len(s), dtype='int32')
code2bool=numpy.array([sv in code2inds for sv in s])
code[code2bool&(cs>0.0)]=2
code[cs==0.0]=1
dropd['code']=code
return
#p='C:/Users/Gregoire/Documents/CaltechWork/echemdrop/20120728NiFeCoTiplate1_test21Aug2012/Sample63_x131_y45_A67B33C0D0_CV2.txt'
#c=time.time()
#d=readechemtxt(p)
#print time.time()-c
#c=time.time()
#dold=readechemtxtold(p)
#print time.time()-c
```
#### File: johnmgregoire/JCAPdatavis/plotcustom_selectsamples.py
```python
import numpy, pylab, os, sys, csv, pickle
from echem_plate_fcns import *
from echem_plate_math import *
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(PyCodePath,'ternaryplot'))
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
#dp='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results/combinedfom.txt'
#savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/parsedresults/allfom'
SYSTEM=21
cabools=[0, 0, 0, 0, 0, 0]
bmcpavebool=True
if SYSTEM==-1:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summarytemp'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 0, 0, 0, 1, 1]
cpbools=[1, 0, 0, 0, 1, 1]
bmcpavebool=False
elif SYSTEM==0:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 1, 1, 1, 1, 0]
cpbools=[1, 1, 1, 1, 1, 0]
cabools=[0, 0, 0, 0, 1, 0]
elif SYSTEM==1:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys1345'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 0, 1, 1, 1, 0]
cpbools=[1, 0, 1, 1, 1, 0]
elif SYSTEM==2:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys15'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 0, 0, 0, 1, 0]
cpbools=[1, 0, 0, 0, 1, 0]
elif SYSTEM==21:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys15indiv6'
xlims=(250, 460)
ylims=(-.8, 2.3)
cvbools=[1, 0, 0, 0, 1, 1]
cpbools=[1, 0, 0, 0, 1, 1]
cabools=[0, 0, 0, 0, 1, 0]
bmcpavebool=False
elif SYSTEM==3:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys3CP5'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[0, 0, 0, 0, 1, 0]
cpbools=[0, 0, 1, 0, 1, 0]
elif SYSTEM==4:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys1CV3CP5'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 0, 0, 0, 1, 0]
cpbools=[0, 0, 1, 0, 1, 0]
elif SYSTEM==41:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys1CV3CP5indiv'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 0, 0, 0, 1, 0]
cpbools=[0, 0, 1, 0, 1, 0]
bmcpavebool=False
elif SYSTEM==5:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys1345indiv'
xlims=(250, 460)
ylims=(-.8, 1.8)
cvbools=[1, 0, 1, 1, 1, 0]
cpbools=[1, 0, 1, 1, 1, 0]
bmcpavebool=False
elif SYSTEM==6:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys1345indiv6'
xlims=(220, 460)
ylims=(-.8, 2.3)
cvbools=[1, 0, 1, 1, 1, 1]
cpbools=[1, 0, 1, 1, 1, 1]
bmcpavebool=False
elif SYSTEM==7:
savefolder='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/summary_sys15indiv6'
xlims=(220, 460)
ylims=(-.8, 2.3)
cvbools=[1, 0, 0, 0, 1, 1]
cpbools=[1, 0, 0, 0, 1, 1]
bmcpavebool=False
p1='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/results/selectsamplesnesteddlist.pck'
p2='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130528NiFeCoCe3platerescan/results/selectsamplesnesteddlist.pck'
p3='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130604NiFeCoCe/results/selectsamplesnesteddlist.pck'
p4='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/20130610NiFeCoCesingle_6321/results/selectsamplesnesteddlist.pck'
p5='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/benchmarking/selectsamplesnesteddlist.pck'
p6='C:/Users/Public/Documents/EchemDropRawData/NiFeCoCe/yunsamples/selectsamplesnesteddlist.pck'
dallsamples=[[693, 693, 170, 170, 170, 170], [3022, 3022, 725, 725, 725, 725], [5047, 5047, 1326, 1326, 1326, 1326], [5050, 5050, 1329, 1329, 1329, 1329], [692, 692, 169, 169, 169, 169]]# list of "compositions" in in terms of sample number. for each composition there should be a corresponding sample number for each of the dall
if not os.path.exists(savefolder):
os.mkdir(savefolder)
os.chdir(savefolder)
#BMCP102010_dallindex=[\
#[numpy.array([0.355, 0.389, 0.374]), numpy.array([0.007, 0.014, 0.011])], \
#[numpy.array([0.376, 0.425, 0.380]), numpy.array([0.017, 0.033, 0.017])], \
#[numpy.array([0.377, 0.419, 0.379]), numpy.array([0.017, 0.034, 0.021])], \
#numpy.nan, numpy.nan]#indexed same as dall and then within is a list of 2 arrays, 0th is vs OER and 1st is STD from 3 repeat measurements
f=open(p1, mode='r')
dall1=pickle.load(f)
f.close()
f=open(p2, mode='r')
dall2=pickle.load(f)
f.close()
f=open(p3, mode='r')
dall3=pickle.load(f)
f.close()
f=open(p4, mode='r')
dall4=pickle.load(f)
f.close()
f=open(p5, mode='r')
dall5=pickle.load(f)
f.close()
f=open(p6, mode='r')
dall6=pickle.load(f)
f.close()
dallinds1={}
dallinds2={}
dallinds3={}
dallinds4={}
dallinds5={}
dallinds6={}
for sl in dallsamples:
il=[]
for s, da, di in zip(sl, [dall1, dall2, dall3, dall4, dall5, dall6], [dallinds1, dallinds2, dallinds3, dallinds4, dallinds5, dallinds6]):
for k, dl in da.iteritems():
stemp=[d['Sample'] for d in dl]
if not k in di.keys():
di[k]=[]
if s in stemp:
di[k]+=[stemp.index(s)]
else:
di[k]+=[numpy.nan]
print 'no data found for sample ', s, k
def CPTafel_sampleind(dallsamplei, cvbools=[1, 1, 1, 1, 1, 1], cpbools=[1, 1, 1, 1, 1, 1]):
if cpbools[2]:
d=dall3['Tafel'][dallinds3['Tafel'][dallsamplei]]
dydx=1./(d['TafelCPSlopeVperdec']*1000.)
y0=d['TafelCPLogExCurrent']+5.
x=numpy.array(xlims)
y=x*dydx+y0
pylab.plot(x, y, 'r--', label='CP3fit')
def allbmcvfig_sampleind(dallsamplei):
d1=dall5['bmcv'][dallinds5['bmcv'][dallsamplei]]
for count, k in enumerate(['complete02', 'complete03', 'complete04']):
d=d1[k]
# x=d['Ewe(VOER)']*1000.
# i=numpy.argmax(x)
# x=x[:i]
# y=d['I(mAcm2)'][:i]
x=d['Ewe(VOER)_LinSub']*1000.
y=d['I(mAcm2)_LinSub']
posinds=numpy.where(y>1e-1)[0][1:]
x=x[posinds]
y=numpy.log10(y[posinds])
if count==0:
pylab.plot(x, y, '-', color='c', label='bmCVs')
else:
pylab.plot(x, y, '-', color='c')
def allbmcpfig_sampleind(dallsamplei, avebool=True, plot2hr=True):#booleans not implemented yet
d1=dall5['bmstepcp'][dallinds5['bmstepcp'][dallsamplei]]
xarr=[]
yarr=[]
for k in ['complete02', 'complete03', 'complete04']:
d=d1[k]
xarr+=[d['Ewe(VOER)']*1000.]
yarr+=[d['I(mAcm2)']]
xarr=numpy.array(xarr)
yarr=numpy.array(yarr)
if avebool:
x=xarr.mean(axis=0)
xe=xarr.std(axis=0)
y=numpy.log10(yarr.mean(axis=0))
pylab.errorbar(x, y, xerr=xe, ls='None', marker='s', mec='m', mfc='m', mew=.9, label='bmCP')
else:
for count, (x, y) in enumerate(zip(xarr, yarr)):
y=numpy.log10(y)
if count==0:
pylab.plot(x, y, ls='None', mec='m', mfc='m', marker=r'$'+`count+2`+'$', label='bmCP')
else:
pylab.plot(x, y, ls='None', mec='m', mfc='m', marker=r'$'+`count+2`+'$')
if plot2hr:
d1=dall5['bm2hrcp'][dallinds5['bm2hrcp'][dallsamplei]]
xarr=[]
yarr=[]
for k in ['complete02', 'complete03', 'complete04']:
d=d1[k]
xarr+=[d['Ewe(VOER)']*1000.]
yarr+=[d['I(mAcm2)']]
xarr=numpy.array(xarr)
yarr=numpy.array(yarr)
x2=xarr.mean()
xe2=xarr.std()
y2=numpy.log10(yarr.mean())
if avebool:
pylab.errorbar(x2, y2, xerr=xe2, ls='None', marker='s', mec='m', mfc='m', mew=.9, label='bmCP 2hr')
else:
for count, (x, y) in enumerate(zip(xarr, yarr)):
y=numpy.log10(y)
if count==0:
pylab.plot(x, y, ls='None', mec='m', mfc='m', marker=r'$'+`count+2`+"'$", label='bmCP 2hr')
else:
pylab.plot(x, y, ls='None', mec='m', mfc='m', marker=r'$'+`count+2`+"'$")
def allbmcafig_sampleind(dallsamplei, avebool=True):#booleans not implemented yet
d1=dall5['bmstepca'][dallinds5['bmstepca'][dallsamplei]]
xarr=[]
yarr=[]
for k in ['complete02', 'complete03', 'complete04']:
d=d1[k]
xarr+=[d['Ewe(VOER)']*1000.]
yarr+=[d['I(mAcm2)']]
xarr=numpy.array(xarr)
yarr=numpy.array(yarr)
if avebool:
x=xarr.mean(axis=0)
xe=xarr.std(axis=0)
y=numpy.log10(yarr.mean(axis=0))
pylab.errorbar(x, y, xerr=xe, ls='None', marker='s', mec='m', mfc='m', mew=.9, label='bmCA')
else:
for count, (x, y) in enumerate(zip(xarr, yarr)):
y=numpy.log10(y)
if count==0:
pylab.plot(x, y, ls='None', mec='pink', mfc='pink', marker=r'$'+`count+2`+'$', label='bmCA')
else:
pylab.plot(x, y, ls='None', mec='pink', mfc='pink', marker=r'$'+`count+2`+'$')
def allLogIvsVfig_sampleind(dallsamplei, cvsmoothpts=8, cvbools=[1, 1, 1, 1, 1, 1], cpbools=[1, 1, 1, 1, 1, 1]):
d=dall1['CV3'][dallinds1['CV3'][dallsamplei]]
vsh=-(.187-0.045)
d1=d
if cvbools[0]:
segd=d['segprops_dlist'][0]
x=(d['Ewe(V)'][segd['inds']]+vsh)*1000.
x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(A)_LinSub'][segd['inds']]
y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-6)[0][5:]
x=x[posinds]
y=numpy.log10(y[posinds])+5.
pylab.plot(x, y, '-', color='k', label='CVv1')
if cvbools[1]:
d=dall2['CV3'][dallinds2['CV3'][dallsamplei]]
vsh=-(.187-0.045)
segd=d['segprops_dlist'][0]
x=(d['Ewe(V)'][segd['inds']]+vsh)*1000.
x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(A)_LinSub'][segd['inds']]
y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-6)[0][5:]
x=x[posinds]
y=numpy.log10(y[posinds])+5.
pylab.plot(x, y, '-', color='b', label='CVv2')
if cvbools[2]:
d=dall3['CV3'][dallinds3['CV3'][dallsamplei]]
vsh=-(.187-0.045)
segd=d['segprops_dlist'][0]
x=(d['Ewe(V)'][segd['inds']]+vsh)*1000.
x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(A)_LinSub'][segd['inds']]
y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-6)[0][5:]
x=x[posinds]
y=numpy.log10(y[posinds])+5.
pylab.plot(x, y, '-', color='r', label='CVv3')
if cvbools[3]:
d=dall4['CV3'][dallinds4['CV3'][dallsamplei]]
vsh=-(.187-0.045)
segd=d['segprops_dlist'][0]
x=(d['Ewe(V)'][segd['inds']]+vsh)*1000.
x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(A)_LinSub'][segd['inds']]
y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-6)[0][5:]
x=x[posinds]
y=numpy.log10(y[posinds])+5.
pylab.plot(x, y, '-', color='y', label='CVv4')
if cvbools[3]:
d=dall4['CV3postCP'][dallinds4['CV3postCP'][dallsamplei]]
vsh=-(.187-0.045)
segd=d['segprops_dlist'][0]
x=(d['Ewe(V)'][segd['inds']]+vsh)*1000.
x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(A)_LinSub'][segd['inds']]
y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-6)[0][5:]
x=x[posinds]
y=numpy.log10(y[posinds])+5.
pylab.plot(x, y, '-', color='g', label='CVv4postCP')
if cpbools[0]:
d=dall1['CP1'][dallinds1['CP1'][dallsamplei]]
vsh=-(.187-0.045)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='k', label='CPv1')
if cpbools[2]:
d=dall3['CP4'][dallinds3['CP4'][dallsamplei]]
vsh=-(.187-0.043)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='r', label='CPv3')
d=dall3['CP5'][dallinds3['CP5'][dallsamplei]]
vsh=-(.187-0.043)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='r')
d=dall3['CP6'][dallinds3['CP6'][dallsamplei]]
vsh=-(.187-0.045)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='r')
if cpbools[3]:
d=dall3['CP4'][dallinds3['CP4'][dallsamplei]]
vsh=-(.187-0.045)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='g', label='CPv4')
d=dall3['CP5'][dallinds3['CP5'][dallsamplei]]
vsh=-(.187-0.045)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='g')
d=dall3['CP6'][dallinds3['CP6'][dallsamplei]]
vsh=-(.187-0.045)
x=(d['FOM']+vsh)*1000.
y=d['I(A)'].mean()
y=numpy.log10(y)+5.
pylab.plot(x, y, 'o', color='g')
#
#pylab.legend(loc=4)
# pylab.ylabel('Log(J / mA cm$^{-2}$)')
# pylab.xlabel('Potential (mV vs OER)')
#
# t='Sample%d,%d:' %(d1['Sample'], d['Sample'])
# t+=''.join([el+'%d' %(100*v) for el, v in zip(d['elements'], d['compositions'])])
# pylab.title(t)
def yuncvplot(dallsamplei):
if cvbools[5]:
d=dall6['CV'][dallinds6['CV'][dallsamplei]]['sample1cv']
x=d['Ewe(VOER)_LinSub']*1000.
#x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(mAcm2)_LinSub']
#y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-1)[0]
x=x[posinds]
y=numpy.log10(y[posinds])
pylab.plot(x, y, '-', color='brown', label='CVv6')
def LinIvsVfig_sampleind(dallsamplei, cvsmoothpts=8, cvbools=[1, 1, 1, 1, 1, 1], cpbools=[1, 1, 1, 1, 1, 1]):
if cvbools[0]:
d=dall1['CV3'][dallinds1['CV3'][dallsamplei]]
vsh=-(.187-0.045)
d1=d
if cvbools[0]:
segd=d['segprops_dlist'][0]
x=(d['Ewe(V)']+vsh)*1000.
x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(A)']*1.e5
y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
x=x[15:-15]
y=y[15:-15]
pylab.plot(x, y, '-', color='k', label='CVv1')
if cvbools[4]:
d1=dall5['bmcv'][dallinds5['bmcv'][dallsamplei]]
for count, k in enumerate(['complete02', 'complete03', 'complete04']):
d=d1[k]
x=d['Ewe(VOER)']*1000.
y=d['I(mAcm2)']
if count==0:
pylab.plot(x, y, '-', color='c', label='bmCVs')
else:
pylab.plot(x, y, '-', color='c')
if cvbools[5]:
d=dall6['CV'][dallinds6['CV'][dallsamplei]]['sample1cv']
x=d['Ewe(VOER)']*1000.
#x=savgolsmooth(x, nptsoneside=cvsmoothpts, order=1)
y=d['I(mAcm2)']
#y=savgolsmooth(y, nptsoneside=cvsmoothpts, order=2)
posinds=numpy.where(y>1e-1)[0]
pylab.plot(x, y, '-', color='brown', label='CVv6')
for dallsamplei in range(5):
pylab.figure(num=dallsamplei)
allLogIvsVfig_sampleind(dallsamplei, cvbools=cvbools, cpbools=cpbools)
CPTafel_sampleind(dallsamplei, cvbools=cvbools, cpbools=cpbools)
if cpbools[4]:
for dallsamplei in range(3):
pylab.figure(num=dallsamplei)
allbmcpfig_sampleind(dallsamplei, avebool=bmcpavebool, plot2hr=True)
if cabools[4]:
for dallsamplei in range(3):
pylab.figure(num=dallsamplei)
allbmcafig_sampleind(dallsamplei, avebool=bmcpavebool)
if cvbools[4]:
for dallsamplei in range(3):
pylab.figure(num=dallsamplei)
allbmcvfig_sampleind(dallsamplei)
if cvbools[5]:
for dallsamplei in [2]:
pylab.figure(num=dallsamplei)
yuncvplot(dallsamplei)
for dallsamplei in range(5):
pylab.figure(num=dallsamplei)
pylab.legend(loc = 'lower right', bbox_to_anchor = (1.12, 0.))
pylab.ylabel('Log(J / mA cm$^{-2}$)')
pylab.xlabel('Potential (mV vs OER)')
d1=dall1['CV3'][dallinds1['CV3'][dallsamplei]]
d=dall3['CV3'][dallinds3['CV3'][dallsamplei]]
t='Sample%d-%d_' %(d1['Sample'], d['Sample'])
t+=''.join([el+'%d' %(int(round(100*v))) for el, v in zip(d['elements'], d['compositions'])])
pylab.title(t)
pylab.xlim(xlims)
pylab.ylim(ylims)
pylab.savefig(t+'.png')
pylab.savefig(t+'.eps')
for dallsamplei in [2]:
pylab.figure()
LinIvsVfig_sampleind(dallsamplei)
pylab.legend(loc = 'upper left')
pylab.ylabel('J / mA cm$^{-2}$')
pylab.xlabel('Potential (mV vs OER)')
d1=dall1['CV3'][dallinds1['CV3'][dallsamplei]]
d=dall3['CV3'][dallinds3['CV3'][dallsamplei]]
t='Sample%d-%d_' %(d1['Sample'], d['Sample'])
t+=''.join([el+'%d' %(int(round(100*v))) for el, v in zip(d['elements'], d['compositions'])])
pylab.title(t)
pylab.xlim(-100, 460)
pylab.ylim(-8, 180)
t+='LinCV'
pylab.savefig(t+'.png')
pylab.savefig(t+'.eps')
pylab.show()
``` |
{
"source": "johnmgregoire/JCAPdepositionmonitor",
"score": 2
} |
#### File: johnmgregoire/JCAPdepositionmonitor/Conversion.py
```python
from PyQt4 import QtCore, QtGui
from PyQt4 import Qt
from elements import ELEMENTS
#from fractions import Fraction
from math import *
import re
#import sys
#LmtngElmnt=Elmnt3 #Set Rate-limiting Element
#Prompt the user or record the crystal montitor reading for
#the deposition rate of the slowest-depositing element.
#This value is usually valuex10^-8 g/sec-cm2
LmtngDepRateXM=1.94
Elmnt1Conc=0.5
Elmnt2Conc=0.25
Elmnt3Conc=0.25
def getElement(chemFormula):
reg0 = '^'
reg1 = '[A-Z]'
reg2 = '[a-z]?'
reg3 = '([ONBC])'
reg4 = '[\d]*'
reg5 = '(\.\d+)?'
reg6 = '(' + reg3 + '(' + reg4 + reg5 + ')' +')?'
reg7 = '$'
totalReg = reg0 +'(' + reg1+reg2 + ')' + reg6 + reg7
regEX = re.compile(totalReg)
matchedReg = re.match(regEX,str(chemFormula))
if matchedReg:
metalName, otherElmntName = matchedReg.group(1), matchedReg.group(3)
otherElmntStoich = matchedReg.group(4)
try:
metalElmntObject = ELEMENTS[metalName]
secondElmntObject = ELEMENTS[otherElmntName] if otherElmntName else ""
if not otherElmntStoich:
otherElmntStoich = 1.
secondElmntStoich = float(otherElmntStoich)
elementMass = metalElmntObject.mass
totalmass = elementMass + (secondElmntObject.mass * secondElmntStoich)
return totalmass #in gram/mol
except KeyError:
pass
return False
# self.Lmnts["Metal Name"] = metalElmntObject
# self.Lmnts["Second Element"] = secondElmntObject
# self.Lmnts["Second Element Stoich"] = float(otherElmntStoich)
# divide using the molar mass to get this
#def conversion(elementMass, secondElmntMass, secondElmntStoich):
# print type(elementMass)
# scaledMass = elementMass + (secondElmntMass * secondElmntStoich)
# convertedMass = 10./scaledMass
# return convertedMass
## units = 'nmol/s cm'+r'$^2$'
#
#def unconvert(elementMass):
# unScaledMass = elementMass + (secondElmntObject.mass * secondElmntStoich)
# unconvertedMass = unScaledMass/10.
# return unconvertedMass
#Set deposition-estimates rates (factors for # of cations per molecule)
def Rates(element1, element2, element3):
Elmnt1Mass = getElement(element1)
Elmnt2Mass = getElement(element2)
Elmnt3Mass = getElement(element3)
LmtngElmnt=Elmnt1Mass
# if LmtngElmnt==Elmnt1Mass:
Elmnt1RateNMol=LmtngDepRateXM*10/Elmnt1Mass
Elmnt1RateGrams=LmtngDepRateXM
print 'Element1 = Limiting Rate Element (in nmol/sec*cm^2):%0.9f' % (Elmnt1RateNMol)
Elmnt2RateNMol=Elmnt1RateNMol*(Elmnt2Conc/Elmnt1Conc)
Elmnt2RateGrams=Elmnt2RateNMol*(Elmnt2Mass/10)
print 'Element2 Calculated Deposition Rate (in 10^-8 g/sec*cm^2):%0.9f' % (Elmnt2RateGrams)
Elmnt3RateNMol=Elmnt1RateNMol*(Elmnt3Conc/Elmnt1Conc)
Elmnt3RateGrams=Elmnt3RateNMol*(Elmnt3Mass/10)
print 'Element3 Calculated Deposition Rate (in 10^-8 g/sec*cm^2):%0.9f' % (Elmnt3RateGrams)
# elif LmtngElmnt==Elmnt2Mass:
## LEcatrate=E2catrate
# Elmnt2RateNMol=LmtngDepRateXM*Elmnt2Mass*(10**8)
# Elmnt2RateGrams=LmtngDepRateXM*10**8
# print 'Element2 = Limiting Rate Element (in nmol/sec*cm^2):%0.9f' % (Elmnt2RateNMol)
# Elmnt3RateNMol=Elmnt2RateNMol*(Elmnt3Conc/Elmnt2Conc)
# Elmnt3RateGrams=Elmnt3RateNMol/(Elmnt3Mass/10)
# print 'Element3 Calculated Deposition Rate (in 10^-8 g/sec*cm^2):%0.9f' % (Elmnt3RateGrams)
# Elmnt1RateNMol=Elmnt2RateNMol*(Elmnt1Conc/Elmnt2Conc)
# Elmnt1RateGrams=Elmnt1RateNMol/(Elmnt1Mass/10)
# print 'Element1 Calculated Deposition Rate (in 10^-8 g/sec*cm^2):%0.9f' % (Elmnt1RateGrams)
# elif LmtngElmnt==Elmnt3Mass:
## LEcatrate=E3catrate
# Elmnt3RateNMol=LmtngDepRateXM*Elmnt3Mass*(10**8)
# Elmnt3RateGrams=LmtngDepRateXM*10**8
# print 'Element3 = Limiting Rate Element (in nmol/sec*cm^2):%0.9f' % (Elmnt3RateNMol)
# Elmnt1RateNMol=Elmnt3RateNMol*(Elmnt1Conc/Elmnt3Conc)
# Elmnt1RateGrams=Elmnt1RateNMol/(Elmnt1Mass/10)
# print 'Element1 Calculated Deposition Rate (in 10^-8 g/sec*cm^2):%0.9f' % (Elmnt1RateGrams)
# Elmnt2RateNMol=Elmnt3RateNMol*(Elmnt2Conc/Elmnt3Conc)
# Elmnt2RateGrams=Elmnt2RateNMol/(Elmnt2Mass/10)
# print 'Element2 Calculated Deposition Rate (in 10^-8 g/sec*cm^2):%0.9f' % (Elmnt2RateGrams)
Elmnt1DepRateReal=Elmnt1RateNMol
Elmnt2DepRateReal=Elmnt2RateNMol
Elmnt3DepRateReal=Elmnt3RateNMol
##Set(create look-up table?) for density values of apprpriate oxides
ElmntOxide1dnsty=7.215 #g/cm^3
ElmntOxide2dnsty=6.67 #g/cm3 (monoclinic, occurs at <70 degreesC)
ElmntOxide3dnsty=6.11 #g/cm3 (α-form/cubic form, ocurs at <570 degreesC)
#Set *real* deposition rates (apply limiting-rate)
Rate=100*((Elmnt1Conc*ElmntOxide1dnsty/Elmnt1Mass)+\
(Elmnt2Conc*ElmntOxide2dnsty/Elmnt2Mass)+\
(Elmnt3Conc*ElmntOxide3dnsty/Elmnt3Mass))/\
(Elmnt1DepRateReal+Elmnt2DepRateReal+Elmnt3DepRateReal)
print 'Time/Thickness Rate (in sec/nm):%0.9F' % (Rate)
##Set *real* deposition rates (apply limiting-rate)
#def RateCalculator():
# Rate=(Elmnt1conc*(E1catrate*ElmntOxide1dnsty/Elmnt1mw)+Elmnt2conc*(E2catrate*ElmntOxide2dnsty/Elmnt2mw)+Elmnt3conc*(E3catrate*ElmntOxide3dnsty/Elmnt3mw))/(Elmnt1DepRateReal+Elmnt2DepRateReal+Elmnt3DepRateReal)
# print Rate
```
#### File: johnmgregoire/JCAPdepositionmonitor/graphwindow_data.py
```python
from PyQt4 import QtGui, QtCore
from datareader import DATA_HEADINGS
import graph
import profilecreator
import date_helpers
import time
""" window that displays a single graph area and various
customization options """
class GraphWindow(QtGui.QWidget):
def __init__(self):
super(GraphWindow, self).__init__()
self.updating = False
self.initUI()
""" draws the user interface of the window """
def initUI(self):
# set window size and position on screen
self.setGeometry(300, 200, 1000, 600)
# get variables from spreadsheet
global DATA_HEADINGS
self.vars = []
for index in range(3, len(DATA_HEADINGS)):
self.vars += [DATA_HEADINGS.get(index)]
# initialize default graph
self.graph = graph.Graph(self, xvarname="Time",
yvarname=self.vars[0])
self.toolbar = graph.NavigationToolbar(self.graph, self)
self.updating = True
self.setWindowTitle(self.vars[0])
self.plotOptionMenu = QtGui.QComboBox()
self.plotOptionMenu.addItem('Switch graph')
self.plotOptionMenu.addItem('Add to left axis')
# make drop-down menu for selecting graphs
self.selectVar = QtGui.QComboBox()
for var in self.vars:
self.selectVar.addItem(var)
self.selectVar.activated[str].connect(self.selectGraph)
# set up layout and sub-layouts
self.layout = QtGui.QVBoxLayout(self)
self.optionslayout = QtGui.QGridLayout()
self.gridlayout = QtGui.QGridLayout()
self.axeslayout = QtGui.QGridLayout()
self.timelayout = QtGui.QGridLayout()
# this exists so auto axis buttons can move if necessary
self.autowidget = QtGui.QWidget(self)
self.autolayout = QtGui.QGridLayout()
# first column holds graph, second column holds graph options
# set the column stretches - 0 is the default
# set minimum column widths
self.gridlayout.setColumnStretch(0, 4)
self.gridlayout.setColumnStretch(1, 0)
self.gridlayout.setColumnMinimumWidth(0,300)
self.gridlayout.setRowMinimumHeight(0,375)
# add drop-down menus and MPL toolbar to top of window
self.layout.addLayout(self.optionslayout)
self.optionslayout.addWidget(self.plotOptionMenu, 0, 0)
self.optionslayout.addWidget(self.selectVar, 0, 1, 1, 3)
self.optionslayout.addWidget(self.toolbar, 1, 0, 1, 4)
# initialize checkbox that acts as pause button
self.hold_cb = QtGui.QCheckBox('Hold', self)
self.hold_cb.stateChanged.connect(self.hold)
# initialize input boxes for axis limits
self.minutes = QtGui.QLineEdit(self)
self.minutes.setFixedWidth(40)
self.hours = QtGui.QLineEdit(self)
self.hours.setFixedWidth(40)
self.days = QtGui.QLineEdit(self)
self.days.setFixedWidth(40)
self.Ymin = QtGui.QLineEdit(self)
self.Ymax = QtGui.QLineEdit(self)
self.YminR = QtGui.QLineEdit(self)
self.YmaxR = QtGui.QLineEdit(self)
# create labels for the input boxes
self.label_time = QtGui.QLabel('Show data from the last:')
self.label_minutes = QtGui.QLabel('minutes')
self.label_hours = QtGui.QLabel('hours')
self.label_days = QtGui.QLabel('days')
self.label_Ymin = QtGui.QLabel('Y Min (left):')
self.label_Ymax = QtGui.QLabel('Y Max (left):')
self.label_YminR = QtGui.QLabel('Y Min (right):')
self.label_YmaxR = QtGui.QLabel('Y Max (right):')
# initialize buttons and their connections
self.set_axes = QtGui.QPushButton('Enter')
self.auto_xaxes = QtGui.QPushButton('Auto X')
self.auto_yaxes = QtGui.QPushButton('Auto Y (left)')
self.auto_yraxes = QtGui.QPushButton('Auto Y (right)')
self.set_axes.clicked.connect(self.setAxes)
self.auto_xaxes.clicked.connect(self.autoXAxes)
self.auto_yaxes.clicked.connect(self.autoYAxes)
self.auto_yraxes.clicked.connect(self.autoYRAxes)
# set the possible streches of input boxes
self.Ymin.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
self.Ymax.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
self.YminR.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
self.YmaxR.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
# initialize menu to choose variable for right-hand axis
self.label_raxis = QtGui.QLabel('Choose a variable to plot on the right-hand axis:')
self.choose_var = QtGui.QComboBox()
for var in self.vars:
self.choose_var.addItem(var)
self.set_raxis = QtGui.QPushButton('Plot')
self.set_raxis.clicked.connect(self.addRAxis)
# place the main grid layout inside layout for window
self.layout.addLayout(self.gridlayout)
# add graph and options to main grid layout
self.gridlayout.addWidget(self.graph, 0, 0)
self.gridlayout.addLayout(self.axeslayout, 0, 1)
# set alignment for the widgets
self.axeslayout.setAlignment(QtCore.Qt.AlignTop)
# create spacers to separate fields in graph options layout
self.spacer1 = QtGui.QSpacerItem(1, 20)
self.spacer2 = QtGui.QSpacerItem(1, 20)
self.spacer3 = QtGui.QSpacerItem(1, 20)
self.spacer4 = QtGui.QSpacerItem(1, 20)
# add items to the graph options layout
self.axeslayout.addItem(self.spacer1, 0, 0)
self.axeslayout.addWidget(self.hold_cb, 1, 0)
self.axeslayout.addItem(self.spacer2, 2, 0)
self.axeslayout.addItem(self.spacer3, 3, 0)
self.axeslayout.addWidget(self.label_raxis, 4, 0)
self.axeslayout.addWidget(self.choose_var, 5, 0)
self.axeslayout.addWidget(self.set_raxis, 6, 0)
self.axeslayout.addItem(self.spacer4, 7, 0)
self.axeslayout.addWidget(self.label_time, 8, 0)
self.axeslayout.addLayout(self.timelayout, 9, 0)
# add options for time axis to a sub-grid
self.timelayout.addWidget(self.minutes, 0, 0)
self.timelayout.addWidget(self.label_minutes, 0, 1)
self.timelayout.addWidget(self.hours, 1, 0)
self.timelayout.addWidget(self.label_hours, 1, 1)
self.timelayout.addWidget(self.days, 2, 0)
self.timelayout.addWidget(self.label_days, 2, 1)
# add more items to graph options layout
self.axeslayout.addWidget(self.label_Ymin, 13, 0)
self.axeslayout.addWidget(self.Ymin, 14, 0)
self.axeslayout.addWidget(self.label_Ymax, 15, 0)
self.axeslayout.addWidget(self.Ymax, 16, 0)
self.axeslayout.addWidget(self.label_YminR, 17, 0)
self.axeslayout.addWidget(self.YminR, 18, 0)
self.axeslayout.addWidget(self.label_YmaxR, 19, 0)
self.axeslayout.addWidget(self.YmaxR, 20, 0)
self.axeslayout.addWidget(self.set_axes, 21, 0)
# hide options for second axis initially
self.label_YminR.hide()
self.YminR.hide()
self.label_YmaxR.hide()
self.YmaxR.hide()
# add widget that holds auto axis buttons
self.axeslayout.addWidget(self.autowidget, 22, 0, 1, 2)
self.autowidget.setLayout(self.autolayout)
# add auto axis buttons
self.autolayout.addWidget(self.auto_xaxes, 0 , 0)
self.autolayout.addWidget(self.auto_yaxes, 0 , 1)
self.autolayout.addWidget(self.auto_yraxes, 0 , 2)
# hide option for auto right axis initially
self.auto_yraxes.hide()
self.show()
""" called when variable to plot is selected """
def selectGraph(self, varName):
# convert QString to string
varString = str(varName)
if self.plotOptionMenu.currentText() == 'Switch graph':
# clear previous plot and set parent to None so it can be deleted
self.graph.clearPlot()
self.graph.setParent(None)
self.gridlayout.removeWidget(self.graph)
self.graph =None
self.graph = graph.Graph(self, xvarname = "Time",
yvarname = varString)
self.gridlayout.addWidget(self.graph, 0, 0)
self.setWindowTitle(varString)
# remove all options for right-hand axis because plot is initialized
# without it
self.label_YminR.hide()
self.YminR.hide()
self.label_YmaxR.hide()
self.YmaxR.hide()
self.auto_yraxes.hide()
# remove the "add to right axis" option from plotOptionMenu if
# it is currently displayed
self.plotOptionMenu.removeItem(2)
# clear axis label fields
self.minutes.clear()
self.hours.clear()
self.days.clear()
self.Ymin.clear()
self.Ymax.clear()
self.YminR.clear()
self.YmaxR.clear()
elif self.plotOptionMenu.currentText() == 'Add to left axis':
self.graph.addVarToAxis(varString)
return
else:
self.graph.addVarToAxis(varString, "right")
return
""" called when request to add plot to right-hand axis is made """
def addRAxis(self):
# get name of variable from selection menu
varName = self.choose_var.currentText()
# convert QString to string
varString = str(varName)
self.graph.addRightAxis(varString)
# reset right y-axis limit fields
self.YminR.clear()
self.YmaxR.clear()
# remove the "add to right axis" option from plotOptionMenu if
# it is currently displayed
self.plotOptionMenu.removeItem(2)
# show all options for right-hand axis
self.plotOptionMenu.addItem('Add to right axis')
self.label_YminR.show()
self.YminR.show()
self.label_YmaxR.show()
self.YmaxR.show()
self.auto_yraxes.show()
""" called whenever new data is ready to be plotted """
def updateWindow(self, newRow):
self.graph.updatePlot(newRow)
""" called by MainMenu every second """
def redrawWindow(self):
if self.updating:
self.graph.timeFrame()
self.graph.draw()
""" toggles auto-updating property of graphs in window """
def hold(self):
if self.updating == True:
self.updating = False
else:
self.updating = True
""" called when user gives input for axis limits """
def setAxes(self):
# [Are axes changing?, new min, new max]
setXAxes = [False, None, None]
setYAxes = [False, None, None]
# dealing with the current time and the time that we have to
# go back for x-axis limits
currTime = time.time()
# measured in seconds
timeBack = 0
# x-axis maximum is current time
setXAxes[2] = date_helpers.dateObj(currTime)
# get and save input from all fields
min_input = self.minutes.text()
hour_input = self.hours.text()
day_input = self.days.text()
Ymin_input = self.Ymin.text()
Ymax_input = self.Ymax.text()
axes_input = [('min', min_input), ('hour', hour_input),
('day', day_input), ('Ymin', Ymin_input),
('Ymax', Ymax_input)]
for axis_tuple in axes_input:
try:
value = float(axis_tuple[1])
if axis_tuple[0] == 'Ymin':
setYAxes[0] = True
setYAxes[1] = value
elif axis_tuple[0] == 'Ymax':
setYAxes[0] = True
setYAxes[2] = value
elif axis_tuple[0] == 'min':
setXAxes[0] = True
timeBack += value*60
elif axis_tuple[0] == 'hour':
setXAxes[0] = True
timeBack += value*60*60
elif axis_tuple[0] == 'day':
setXAxes[0] = True
timeBack += value*60*60*24
# if no input was given to field, ignore it
except ValueError:
pass
# set x-axis minimum to current time minus specified time window
setXAxes[1] = date_helpers.dateObj(currTime - timeBack)
# if y-axis limits have been changed
if setYAxes[0]:
self.graph.setYlim(amin=setYAxes[1], amax=setYAxes[2])
# if x-axis limits have been changed
if setXAxes[0]:
self.graph.auto = False
self.graph.timeWindow = timeBack
self.graph.setXlim(amin=setXAxes[1], amax=setXAxes[2])
if self.graph.hasRightAxis:
self.setRAxis()
""" called when user gives input for right-hand axis limits """
def setRAxis(self):
# [Are axes changing?, new min, new max]
setAxes = [False, None, None]
YminR_input = self.YminR.text()
YmaxR_input = self.YmaxR.text()
try:
setAxes[0] = True
setAxes[1] = float(YminR_input)
except ValueError:
pass
try:
setAxes[0] = True
setAxes[2] = float(YmaxR_input)
except ValueError:
pass
if setAxes:
self.graph.setRYlim(amin=setAxes[1], amax=setAxes[2])
""" called when 'Auto X' button is clicked
sets x axis limits automatically to fit all data """
def autoXAxes(self):
self.graph.auto = True
self.graph.axes.autoscale(axis ='x')
self.minutes.clear()
self.hours.clear()
self.days.clear()
""" called when 'Auto Y (left)' button is clicked
sets y axis limits automatically to fit all data """
def autoYAxes(self):
self.graph.axes.autoscale(axis ='y')
self.Ymin.clear()
self.Ymax.clear()
""" called when 'Auto Y (right)' button is clicked
sets right-hand y axis limits automatically to fit all data """
def autoYRAxes(self):
self.graph.rightAxes.autoscale(axis ='y')
self.YminR.clear()
self.YmaxR.clear()
``` |
{
"source": "johnmgregoire/JCAPGeneratePrintCode",
"score": 2
} |
#### File: johnmgregoire/JCAPGeneratePrintCode/fcns_generatecompositions.py
```python
import time, copy, pickle, itertools
import os, os.path
import sys
import numpy, pylab
def genbinarycomps(intervs, elind1, elind2, ndim=4):
aa=numpy.linspace(0.,1.,intervs+1)
c=numpy.zeros((len(aa), ndim), dtype='float64')
c[:, elind1]=aa
c[:, elind2]=1.-aa
return c
def inner_4(intervs):
return numpy.float32([(b, c, intervs-a-b-c, a) for a in range(1,intervs)[::-1] for b in range(1,intervs-a) for c in range(1,intervs-a-b) if (intervs-a-b-c)>0 and (intervs-a-b-c)<intervs][::-1])/intervs
def all_4(intervs):
return numpy.float32([(b, c, intervs-a-b-c, a) for a in numpy.arange(0,intervs+1)[::-1] for b in numpy.arange(0,intervs+1-a) for c in numpy.arange(0, intervs+1-a-b)][::-1])/intervs
def inner_3(intervs):
return numpy.float32([(b, intervs-a-b, a) for a in range(1,intervs) for b in range(1,intervs-a) if (intervs-a-b)>0 and (intervs-a-b)<intervs][::-1])/intervs
def all_3(intervs):
return numpy.float32([(b, intervs-a-b, a) for a in numpy.arange(0,intervs+1)[::-1] for b in numpy.arange(0,intervs+1-a)][::-1])/intervs
def inner_2(intervs):
return numpy.float32([(intervs-a, a) for a in range(1,intervs)][::-1])/intervs
def all_2(intervs):
return numpy.float32([(intervs-a, a) for a in numpy.arange(0,intervs+1)[::-1]][::-1])/intervs
def combi_6el_inner4(intervs, codes=None):
combs4=list(itertools.combinations(range(6),4))
c4=inner_4(intervs)
nper=len(c4)
c6=numpy.zeros((len(combs4)*nper, 6), dtype='float32')
for count, inds4 in enumerate(combs4):
c6[count*nper:(count+1)*nper, inds4]=c4[:, :]
if not codes:
return c6
if codes=='blocks as duplicates':
co=[]
for count in range(len(c6)//nper):
co+=[count*100]*nper
return c6, numpy.array(co)
def combi_6el_inner3(intervs, codes=None):
combs3=list(itertools.combinations(range(6),3))
c3=inner_3(intervs)
nper=len(c3)
c6=numpy.zeros((len(combs3)*nper, 6), dtype='float32')
for count, inds3 in enumerate(combs3):
c6[count*nper:(count+1)*nper, inds3]=c3[::-1, :]
if not codes:
return c6
if codes=='blocks as duplicates':
co=[]
for count in range(len(c6)//nper):
co+=[count*100]*nper
return c6, numpy.array(co)
def combi_6el_inner2(intervs, codes=None):
combs2=list(itertools.combinations(range(6),2))
c2=inner_2(intervs)
nper=len(c2)
c6=numpy.zeros((len(combs2)*nper, 6), dtype='float32')
for count, inds2 in enumerate(combs2):
c6[count*nper:(count+1)*nper, inds2]=c2[::-1, :]
if not codes:
return c6
if codes=='blocks as duplicates':
co=[]
for count in range(len(c6)//nper):
co+=[count*100]*nper
return c6, numpy.array(co)
def combi_6el_inner1(intervs, codes=None):
ctemp=numpy.zeros(11, dtype='float32')
ctemp[5]=1.
c6=numpy.float32([ctemp[i:i+6] for i in range(5, -1, -1)])
if not codes:
return c6
return c6, numpy.array([0]*len(c6))
def combi_6el_all3(intervs, codes=None):
combs3=list(itertools.combinations(range(6),3))
c3=all_3(intervs)
nper=len(c3)
c6=numpy.zeros((len(combs3)*nper, 6), dtype='float32')
for count, inds3 in enumerate(combs3):
c6[count*nper:(count+1)*nper, inds3]=c3[:, :]
if not codes:
return c6
# co=numpy.zeros(len(combs2)*nper, dtype='int32')
if codes=='blocks as duplicates':
co=[]
for count in range(len(c6)//nper):
co+=[count*100]*nper
return c6, numpy.array(co)
def combi_6el_all2(intervs, codes=None):
combs2=list(itertools.combinations(range(6),2))
c2=all_2(intervs)
nper=len(c2)
c6=numpy.zeros((len(combs2)*nper, 6), dtype='float32')
for count, inds2 in enumerate(combs2):
c6[count*nper:(count+1)*nper, inds2]=c2[:, :]
if not codes:
return c6
# co=numpy.zeros(len(combs2)*nper, dtype='int32')
if codes=='blocks as duplicates':
co=[]
for count in range(len(c6)//nper):
co+=[count*100]*nper
return c6, numpy.array(co)
def get_custom_select_comps_6els_5intervs():
#unary
comps=[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0][i:i+6] for i in [5, 4, 3, 2, 1, 0]]
#binary
perm2=list(itertools.combinations(range(6),2))
for count, tup in enumerate(perm2):
tup=list(tup)
for i in tup:
c0=numpy.zeros(6, dtype='float32')
c0[tup]=.2
c0[i]=.8
comps+=[c0]
#tern
perm3=list(itertools.combinations(range(6),3))
for count, tup in enumerate(perm3):
tup=list(tup)
for i in tup:
c0=numpy.zeros(6, dtype='float32')
c0[tup]=.2#set all 3 of ternary space to 0.2
c0[i]=.6#then change 1 of them to 0.6
comps+=[c0]
#quat
perm4=list(itertools.combinations(range(6),4))
for count, tup in enumerate(perm4):
tup=list(tup)
for i in tup:
c0=numpy.zeros(6, dtype='float32')
c0[tup]=.2#set all 3 of ternary space to 0.2
c0[i]=.4#then change 1 of them to 0.6
comps+=[c0]
return numpy.array(comps)
sys.path.append('C:/Users/Gregoire/Documents/PythonCode/ternaryplot')
#from myquaternaryutility import QuaternaryPlot
#from myternaryutility import TernaryPlot
#
#for d in dlist:
# c=numpy.array([d[el] for el in ['A', 'B', 'C', 'D']])
# if c.sum()>0:
# c/=c.sum()
# d['compositions']=c
#
#carr=numpy.array([d['compositions'] for d in dlist])
#stpq=QuaternaryPlot(111)
#stpq.scatter(carr)
#
#pylab.figure()
#for count, tv in enumerate(comps_d):
# stpq=TernaryPlot((4, 2, count+1))
# tvnorm=[tvv/tvv.sum() for tvv in tv]
# stpq.scatter(tvnorm, marker='.', c='r', edgecolor='none')
# if count<5:
# ttt=tc
# else:
# ttt=tc19
# stpq.scatter(ttt, marker='.', c='g', edgecolor='none')
#
#pylab.show()
#####################
#indsnonz=[[i,j] for i in range(6) for j in range(i,6)]
#crotate2=[]
#for inds in indsnonz:
# z=numpy.zeros(6, dtype='float32')
# z[inds]=1.
# z/=z.sum()
# crotate2+=[z]
#
#indsnonz=[i for i in range(-1, 6)]
#indsnonz=indsnonz*5
#indsnonz=indsnonz[1:]
#ccompref=[]
#for inds in indsnonz:
# z=numpy.zeros(6, dtype='float32')
# if inds>=0:
# z[inds]=1.
# ccompref+=[z]
```
#### File: johnmgregoire/JCAPGeneratePrintCode/gen_9cellfootprint_co2rr_platemaps_v2_NaNcomps.py
```python
import numpy,pylab
def rgb_comp(terncoordlist):
# if numpy.array(terncoordlist).sum()==0:
# return numpy.array([1, 1, 1.])
cmy_cmyk=lambda a:a[:3]*(1.-a[3])+a[3]
rgb_cmy=lambda a:1.-a
rgb_cmyk=lambda a:rgb_cmy(cmy_cmyk(a))
return numpy.array([rgb_cmyk(numpy.array(a)) for a in terncoordlist])
sample_diameter=100*25.4/300.#
x_pitch__cell_od=112*25.4/300.
y_pitch__cell_pitch=128*25.4/300.
xsub, ysub=150., 100.
xvals=numpy.arange(14)*x_pitch__cell_od
xvals+=(150.-(xvals.max()-xvals.min()))/2.
yvals=numpy.arange(9)*y_pitch__cell_pitch
yvals+=(100.-(yvals.max()-yvals.min()))/2.
#xyf=numpy.array([[xv, yv] for yv in yvals[::-1] for xv in xvals])
xyf=numpy.array([[xv, yv] for xv in xvals for yv in yvals[::-1]])
xyfid=numpy.array([[]])
xypureref=[]
xyspecref=[]
#dfid=22*sc
xyfid=[]#numpy.array([[-dfid, 0], [dfid, 0], [0, dfid], [-7*sc, -21*sc]])
#xyfid=numpy.array([(0, y) for y in [-dfid, dfid]]+[(x, 0) for x in [-dfid, dfid]])
intervs=8
binarylist_compslist=[]
for i, j in [(0, 1), (0, 2), (1, 2), (1, 0), (2, 0), (2, 1), (0, 1), (0, 2), (1, 2), (1, 0), (2, 0), (2, 1)]:
tc=numpy.zeros((intervs+1, 4), dtype='float64')
tc[:, j]=1.*numpy.arange(intervs+1)/intervs
tc[:, i]=1.-tc[:, j]
binarylist_compslist+=[list(tc)]
comps1=[(1., 0., 0., 0.), (0., 1., 0., 0.), (0., 0., 1., 0.)]
compnone=[0., 0., 0., 0.]
colnone=[1., 1., 1.]
spaces=[0]*999
compsf=[]
colsf=[]
codef=[]
num_code_segment=12+3+3#12 for the binary lines and then 3 repeats of elemtnals in 2 different columns
comps1=numpy.array(comps1)
for sp, cd, compsx in zip(spaces, numpy.arange(num_code_segment, dtype='int32')*100, binarylist_compslist+[comps1, comps1, comps1]+[comps1, comps1, comps1]):
compsf+=list(compsx)+[compnone]*sp
colsf+=list(rgb_comp(compsx))+[colnone]*sp
codef+=[cd]*len(compsx)+[4]*sp
print len(compsf)
pylab.figure()
comps=compsf[:len(xyf)]
cols=colsf[:len(xyf)]
code=codef[:len(xyf)]
cols=numpy.array(cols)
code=numpy.array(code)
xy=xyf
print len(comps), len(xy)
#temp=numpy.zeros(16)
#temp[8]=1.
#comps8pure=numpy.array([temp[i:i+8] for i in [0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 6, 5]])
#comps8pureinspec=numpy.array([temp[i:i+8] for i in [0, 8, 0, 7, 0, 6, 0, 5]])
#codeinspec=[1, 2, 1, 2, 1, 2, 1, 2]
#colinspec=rgb_comp(comps8pureinspec[:, :4])
#colinpure=rgb_comp(comps8pure[:, :4])
#codeinpure=[4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2]
#
#comps8pure=numpy.concatenate([comps8pure])
#codepure=numpy.concatenate([codeinpure])
#colpure=numpy.concatenate([colinpure])
#
#
#rgb_comp(comps1)
#sortmetricspec=numpy.array([x*100-y for x, y in xyspecref])
#sortindsspec=numpy.argsort(sortmetricspec)
#xyspecref=xyspecref[sortindsspec]
#comps8spec=[]
#codespec=[]
#colspec=[]
#jshift=0
#for i in range(len(xyspecref)):
## if i in [29, 68]:
## jshift+=1
# j=(i-jshift)%len(comps8pureinspec)
# comps8spec+=[comps8pureinspec[j]]
# codespec+=[codeinspec[j]]
# colspec+=[colinspec[j]]
temp=numpy.zeros(4)
comps8=numpy.array([numpy.concatenate([c, temp]) for c in comps])
temp=numpy.zeros(8)
#comps8spec=numpy.array([temp for q in xyspecref])
xyall=numpy.concatenate([xy])
compall=numpy.concatenate([comps8])
colall=numpy.concatenate([cols])
codeall=numpy.concatenate([code])
#xyall=numpy.concatenate([xy, xyspecref])
#compall=numpy.concatenate([comps8, comps8spec])
#colall=numpy.concatenate([cols, colspec])
#codeall=numpy.concatenate([code, codespec])
sortmetric=numpy.array([-y*1000+x for x, y in xyall])
sortinds=numpy.argsort(sortmetric)
xyallsort=xyall[sortinds]
codeallsort=codeall[sortinds]
compallsort=compall[sortinds]
colallsort=colall[sortinds]
startsmp=1
intfmt=lambda x:'%d' %x
floatarrfmt=lambda x:', '.join(['%.3f' %xx for xx in x])
linestr=lambda smp, xy, cmp, cd:', '.join((intfmt(smp), floatarrfmt(xy), floatarrfmt([sample_diameter, sample_diameter]), floatarrfmt(cmp), intfmt(cd)))
datastr='\n'.join([linestr(i+startsmp, xy, cmp, cd) for i, (xy, cmp, cd) in enumerate(zip(xyallsort, compallsort, codeallsort))])
fidstr=', '.join(['(%.3f, %.3f)' %tuple(xyv) for xyv in xyfid])
a='% Xfiducial, Yfiducial='
b=' mm'
c='% Sample, x(mm), y(mm), dx(mm), dy(mm), A(fraction), B(fraction), C(fraction), D(fraction), E(fraction), F(fraction), G(fraction), H(fraction), code(0=sample; 1=spectral reference; 2=ABCD control; 3=EFGH control; 4=half thickness; 5=doublethickness; 10=empty)'
mainstr='%s%s%s\n%s\n%s' %(a, fidstr, b, c, datastr)
fn=r'D:\Google Drive\Documents\caltechwork\platemaps\201701co2rr_v1\co2rr_10x14_v1'
f=open(fn+'.txt', mode='w')
f.write(mainstr)
f.close()
#marks=['s', 's', 's', 'o', 'v', '^', '','','','','.']
def marks(cd):
return 'o'
# dup=cd//100
# cd2=cd%100
# if cd>0 and cd<5:
# return ['', 's', 's', 'o', '.'][cd]
# return (['s']*6+['^', '>', 'v', '<', 's'])[dup]
print set(codeallsort)
for x, y, col, cd in zip(xyallsort.T[0],xyallsort.T[1], colallsort, codeallsort):
if cd in [4]:
pylab.scatter([x], [y],color='k',s=2,marker=marks(cd), edgecolor='k')
elif cd in [1]:
pylab.scatter([x], [y],color='w',s=2,marker=marks(cd), edgecolor='k')
else:
pylab.scatter([x], [y],color=col,s=500,marker=marks(cd), edgecolor='none')
#pylab.scatter(xypureref.T[0],xypureref.T[1],color='w', edgecolor='r',s=10,marker='s', lw=1)
if len(xyfid)>0:
pylab.plot(xyfid.T[0],xyfid.T[1],'k+', ms=6)
pylab.xlim(0, 150)
pylab.ylim(0, 100)
pylab.gca().set_aspect(1)
lablist=['%d:%d' %(cd, (codeallsort==cd).sum()) for cd in [0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1, 2, 3, 4]]
pylab.title(','.join(lablist[:6])+'\n'+','.join(lablist[6:]))
pylab.savefig(fn+'.png', dpi=300)
pylab.show()
```
#### File: johnmgregoire/JCAPGeneratePrintCode/genplatemap_100mm_4copies10interv.py
```python
import numpy,pylab
def rgb_comp(terncoordlist):
cmy_cmyk=lambda a:a[:3]*(1.-a[3])+a[3]
rgb_cmy=lambda a:1.-a
rgb_cmyk=lambda a:rgb_cmy(cmy_cmyk(a))
return numpy.array([rgb_cmyk(numpy.array(a)) for a in terncoordlist])
sc=2.032
#xy=numpy.array([(x,y) for x in numpy.linspace(-25,25,51) for y in numpy.linspace(-25,25,51) if (x**2+y**2)<(46/sc)**2])
xyfid=numpy.array([[]])
refl=[-15, -7, 1, 9, 17]
#refl=[-13, -5, 3, 11, 19]
rcrit=42.5
ncrit=20
xyf=numpy.array([(x,y) for y in numpy.linspace(25,-25,51) for x in numpy.linspace(-25,25,51) if (x**2+y**2)<(rcrit/sc)**2 and x>=-ncrit and x<=ncrit and y>=-ncrit and y<=ncrit and (y!=-ncrit) and not (x in refl)])
xypureref=numpy.array([(x,y) for y in numpy.linspace(25,-25,51) for x in numpy.linspace(-25,25,51) if (x**2+y**2)<(rcrit/sc)**2 and x>=-ncrit and x<=ncrit and y>=-ncrit and y<=ncrit and (y==-ncrit ) and not (x in refl)])#and x in [2, 3, 4, 5]
xyspecref=numpy.array([(x,y) for y in numpy.linspace(25,-25,51) for x in numpy.linspace(-25,25,51) if (x**2+y**2)<(rcrit/sc)**2 and x>=-ncrit and x<=ncrit and y>=-ncrit and y<=ncrit and (x in refl)])
xypureref*=sc
xyspecref*=sc
dfid=22*sc
xyfid=numpy.array([[-dfid, 0], [dfid, 0], [0, dfid], [-7*sc, -21*sc]])
#xyfid=numpy.array([(0, y) for y in [-dfid, dfid]]+[(x, 0) for x in [-dfid, dfid]])
intervs=10
comps1=[(1.0*b/intervs, 1.0*c/intervs, 1.0*(intervs-a-b-c)/intervs, 1.0*a/intervs) for a in numpy.arange(0,intervs+1)[::-1] for b in numpy.arange(0,intervs+1-a) for c in numpy.arange(0, intervs+1-a-b)][::-1]
comps2=[]
cols1=list(rgb_comp(comps1))
cols2=list(rgb_comp(comps2))
compnone=[0., 0., 0., 0.]
colnone=[1., 1., 1.]
spaces=[11, 10, 20, 1]
compsf=[]
colsf=[]
codef=[]
comps1=numpy.array(comps1)
for sp, cd, th in zip(spaces, [0, 20, 30, 130], [1., .2, .5, .5]):
compsf+=list(comps1*th)+[compnone]*sp
colsf+=cols1+[colnone]*sp
codef+=[cd]*len(comps1)+[4]*sp
for PLATE in [1]:
pylab.figure()
if PLATE==1:
comps=compsf[:len(xyf)]
cols=colsf[:len(xyf)]
code=codef[:len(xyf)]
else:
comps=compsf[len(xyf):]
cols=colsf[len(xyf):]
code=codef[len(xyf):]
cols=numpy.array(cols)
code=numpy.array(code)
xy=xyf[:len(comps)]*sc
if PLATE==2:
fct=1.-.5*(code==20)+1.*(code==50)
comps=[numpy.array(c)*fc for c, fc in zip(comps, fct)]
print len(comps), len(xy)
temp=numpy.zeros(16)
temp[8]=1.
comps8pure=numpy.array([temp[i:i+8] for i in [8, 7, 6, 5]])
comps8pureinspec=numpy.array([temp[i:i+8] for i in [0, 4, 0, 3, 0, 2, 0, 8, 0, 7, 0, 6, 0, 5]])
codeinspec=[1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1, 2, 1, 2]
colinspec=rgb_comp(comps8pureinspec[:, :4]+comps8pureinspec[:, 4:])
colinpure=rgb_comp(comps8pure[:, :4])
codeinpure=[2]*len(colinpure)
if PLATE==1:
comps8pure=numpy.concatenate([comps8pure, comps8pure*.5, comps8pure*.2])
codepure=numpy.concatenate([codeinpure, [32]*len(codeinpure), [22]*len(codeinpure)])
colpure=numpy.concatenate([colinpure, colinpure, colinpure])
elif PLATE==2:
comps8pure=numpy.concatenate([comps8pure, comps8pure*.5, comps8pure*2.])
codepure=numpy.concatenate([codeinpure, [32]*len(codeinpure), [22]*len(codeinpure)])
colpure=numpy.concatenate([colinpure, colinpure, colinpure])
rgb_comp(comps1)
sortmetricspec=numpy.array([x*100-y for x, y in xyspecref])
sortindsspec=numpy.argsort(sortmetricspec)
xyspecref=xyspecref[sortindsspec]
comps8spec=[]
codespec=[]
colspec=[]
jshift=0
for i in range(len(xyspecref)):
if i in [29, 68]:
jshift+=1
j=(i-jshift)%len(comps8pureinspec)
comps8spec+=[comps8pureinspec[j]]
codespec+=[codeinspec[j]]
colspec+=[colinspec[j]]
temp=numpy.zeros(4)
comps8=numpy.array([numpy.concatenate([c, temp]) for c in comps])
temp=numpy.zeros(8)
#comps8spec=numpy.array([temp for q in xyspecref])
xyall=numpy.concatenate([xy, xypureref, xyspecref])
compall=numpy.concatenate([comps8, comps8pure, comps8spec])
colall=numpy.concatenate([cols, colpure, colspec])
codeall=numpy.concatenate([code, codepure, codespec])
sortmetric=numpy.array([-y*100+x for x, y in xyall])
sortinds=numpy.argsort(sortmetric)
xyallsort=xyall[sortinds]
codeallsort=codeall[sortinds]
compallsort=compall[sortinds]
colallsort=colall[sortinds]
xyallsortshift=numpy.array([[x+50., y+50-2.71] for x, y in xyallsort])
xyfidshift=numpy.array([[x+50., y+50-2.71] for x, y in xyfid])
if PLATE==1:
startsmp=1
else:
startsmp=1370
intfmt=lambda x:'%d' %x
floatarrfmt=lambda x:', '.join(['%.3f' %xx for xx in x])
linestr=lambda smp, xy, cmp, cd:', '.join((intfmt(smp), floatarrfmt(xy), floatarrfmt([1.016, 1.016]), floatarrfmt(cmp), intfmt(cd)))
datastr='\n'.join([linestr(i+startsmp, xy, cmp, cd) for i, (xy, cmp, cd) in enumerate(zip(xyallsortshift, compallsort, codeallsort))])
fidstr=', '.join(['(%.3f, %.3f)' %tuple(xyv) for xyv in xyfidshift])
a='% Xfiducial, Yfiducial='
b=' mm'
c='% Sample, x(mm), y(mm), dx(mm), dy(mm), A(fraction), B(fraction), C(fraction), D(fraction), E(fraction), F(fraction), G(fraction), H(fraction), code(0=sample; 1=spectral reference; 2=ABCD control; 3=EFGH control; 4=half thickness; 5=doublethickness; 10=empty)'
mainstr='%s%s%s\n%s\n%s' %(a, fidstr, b, c, datastr)
fn='C:/Users/Gregoire/Documents/CaltechWork/platemaps/100mm_4copies10interv/100mm_4copies10interv'
if PLATE==1:
fn+='1'
elif PLATE==2:
fn+='2'
f=open(fn+'.txt', mode='w')
f.write(mainstr)
f.close()
marks=['s', 's', 's', 'o', 'v', '^', '','','','','.']
def marks(cd):
dup=cd//100
cd=cd%100
if cd<5:
return ['s', 's', 's', 'o', '.'][cd]
if (cd//10)<4:
return 'v'
else:
return '^'
circ=pylab.Circle((50.,50.-2.71),radius=50,edgecolor='k',lw=1,facecolor='none')
ax=pylab.subplot(111)
ax.add_patch(circ)
pylab.plot([-16.25+50, 16.24+50], [0, 0], 'k-', lw=2)
for x, y, col, cd in zip(xyallsortshift.T[0],xyallsortshift.T[1], colallsort, codeallsort):
if cd%10 in [1, 2, 3, 9]:
pylab.scatter(x, y,color=col,s=14,marker=marks(cd), edgecolor='r')
elif cd in [4]:
pylab.scatter(x, y,color='k',s=2,marker=marks(cd), edgecolor='k')
else:
pylab.scatter(x, y,color=col,s=14,marker=marks(cd), edgecolor='none')
#pylab.scatter(xypureref.T[0],xypureref.T[1],color='w', edgecolor='r',s=10,marker='s', lw=1)
if len(xyfidshift)>0:
pylab.plot(xyfidshift.T[0],xyfidshift.T[1],'k+', ms=6)
pylab.xlim(0, 100)
pylab.ylim(0, 100)
ax.set_aspect(1)
lablist=['%d:%d' %(cd, (codeallsort==cd).sum()) for cd in [0, 20, 30, 130, 1, 2, 3, 4, 22, 32]]
pylab.title(','.join(lablist))
pylab.savefig(fn+'.png', dpi=300)
pylab.show()
```
#### File: johnmgregoire/JCAPGeneratePrintCode/platemap_randomizebycode_tempshift7.py
```python
import time, copy, pickle
import os, os.path
import sys, random
import numpy, pylab
sys.path.append('C:/Users/Gregoire/Documents/PythonCode/JCAP')
from readplatemap import *
def writerandomizedplatemap(modelpath, newpath, randcodes):
writelines=[]
f=open(modelpath, mode='r')
ls=f.readlines()[:2]
writelines+=[l.strip() for l in ls]
f.close()
dlist=readsingleplatemaptxt(modelpath, returnfiducials=False)
rdlist=copy.deepcopy(dlist)
ks=dlist[0].keys()
for rc in randcodes:
inds=[i for i, d in enumerate(dlist) if d['code']==rc]
smps=[d['Sample'] for i, d in enumerate(dlist) if d['code']==rc]
rinds=copy.copy(inds)
random.shuffle(rinds)
for i, j, smp in zip(inds, rinds, smps):
for k in ks:
if not k in ['Sample', 'x', 'y']:
rdlist[i][k]=dlist[j][k]
#SHIFT Y VALUES
for d in rdlist:
d['y']-=2.71
print set([d['code'] for d in rdlist])
print len([d['Sample'] for d in rdlist if d['code']==0])
k_f=[\
('Sample','%04d'),\
('x','%.2f'),\
('y','%.2f'),\
('dx','%.2f'),\
('dx','%.2f'),\
('A','%.3f'),\
('B','%.3f'),\
('C','%.3f'),\
('D','%.3f'),\
('E','%.3f'),\
('F','%.3f'),\
('G','%.3f'),\
('H','%.3f'),\
('code','%d'),\
]
writelines+=[', '.join([f %d[k] for k, f in k_f]) for d in rdlist]
f=open(newpath, mode='w')
f.write('\n'.join(writelines))
f.close()
fold='C:/Users/Gregoire/Documents/CaltechWork/platemaps/'
#for p1 in ['v8/plate500_100mm_v8_pl1.txt', 'v8/plate500_100mm_v8_pl2.txt']:
# p2=p1.replace('v8', 'v8.2')
# writerandomizedplatemap(os.path.join(fold, p1), os.path.join(fold, p2), [0, 20, 50])
for p1 in ['v6.2_6.3/plate555_1map_tempfiducialshifted.txt']:
p2='v6.2_6.3/plate555_randomized.txt'
writerandomizedplatemap(os.path.join(fold, p1), os.path.join(fold, p2), [0])
#for p1, p2 in zip(['v5.5/0020-04-0541-mp_newcodes.txt', 'v5.5/0021-04-0541-mp_newcodes.txt', 'v5.5/0022-04-0541-mp_newcodes.txt'], \
# ['v5.5/plate333_1_v5_5.txt', 'v5.5/plate333_2_v5_5.txt', 'v5.5/plate333_3_v5_5.txt']):
# writerandomizedplatemap(os.path.join(fold, p1), os.path.join(fold, p2), [0])
``` |
{
"source": "johnmgregoire/JCAPPlatemapVisualize",
"score": 2
} |
#### File: johnmgregoire/JCAPPlatemapVisualize/platemap_align_to_image.py
```python
import time
import os, os.path
import sys
import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from plate_image_align_Dialog import *
def start(previousmm=None):
mainapp=QApplication(sys.argv)
form=MainMenu(previousmm)
form.show()
form.setFocus()
global PARENT
PARENT=form
mainapp.exec_()
return form
mm=None
mm=start()
```
#### File: johnmgregoire/JCAPPlatemapVisualize/platemap_ui.py
```python
import time
import time
import os, os.path
import sys
import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import operator
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
try:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
except ImportError:
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import numpy.ma as ma
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import pylab
import pickle
from readmap import *
PyCodePath=os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
print PyCodePath
from matplotlib.ticker import FuncFormatter
from matplotlib.ticker import ScalarFormatter
def myexpformat_4digs(x, pos):
return '%.3e' %x
# for ndigs in range(4):
# lab=(('%.'+'%d' %ndigs+'e') %x).replace('e+0','e').replace('e+','e').replace('e0','').replace('e-0','e')
# if eval(lab)==x:
# return lab
# return lab
ExpTickLabels=FuncFormatter(myexpformat_4digs)
RegTickLabels=matplotlib.ticker.ScalarFormatter()
def autotickformat(ax, x=False, y=False, ndec=3):
for bl, xax, lims in zip([x, y], [ax.xaxis, ax.yaxis], [ax.get_xlim(), ax.get_ylim()]):
if bl:
try:
doit=numpy.max(numpy.log10(numpy.abs(numpy.array(lims))))<(-ndec)
doit=doit or numpy.min(numpy.log10(numpy.abs(numpy.array(lims))))>ndec
except:
print 'error on axis formatter for lims ', lims
continue
if doit:
xax.set_major_formatter(ExpTickLabels)
else:
xax.set_major_formatter(RegTickLabels)
def autocolorbarformat(lims, ndec=3):
try:
doit=numpy.max(numpy.log10(numpy.abs(numpy.array(lims))))<(-ndec)
doit=doit or numpy.min(numpy.log10(numpy.abs(numpy.array(lims))))>ndec
except:
print 'error on axis formatter for lims ', lims
return
if doit:
return ExpTickLabels
else:
return RegTickLabels
wd=os.getcwd()
sys.path.append(os.path.join(PyCodePath,'PythonCompositionPlots'))
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
from quaternary_FOM_stackedtern5 import *
from quaternary_FOM_stackedtern10 import *
from quaternary_FOM_stackedtern20 import *
from quaternary_FOM_stackedtern30 import *
#sys.path.append(os.path.join(PyCodePath,'JCAPPyDBComm'))
#from mysql_dbcommlib import *
#
#sys.path.append(os.path.join(PyCodePath, 'PythonCodeSecureFiles'))
#from paths import *
#if os.path.isdir(EchemSavePath):
# os.chdir(EchemSavePath)
try:
wd=os.path.split(os.path.split(os.path.split(os.path.split(os.path.realpath(__file__))[0])[0])[0])[0]
os.chdir(wd)
except:
pass
class messageDialog(QDialog):
def __init__(self, parent=None, title=''):
super(messageDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
QObject.connect(self.buttonBox, SIGNAL("rejected()"), self.reject)
mainlayout.addWidget(self.buttonBox, 0, 0)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
def ExitRoutine(self):
return
def mygetopenfile(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getOpenFileName(xparent,''.join(['Select file to open:', markstr]),os.path.join(xpath, filename).replace('\\','/')))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getOpenFileName(parent,''.join(['Select file to open: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
def mygetopenfiles(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfns=QFileDialog.getOpenFileNames(xparent,''.join(['Select file to open:', markstr]),os.path.join(xpath, filename).replace('\\','/'))
xparent.destroy()
xapp.quit()
else:
returnfns=QFileDialog.getOpenFileNames(parent,''.join(['Select file to open: ', markstr]),os.path.join(xpath, filename).replace('\\','/'))
return [str(s) for s in returnfns]
def mygetsavefile(parent=None, xpath="%s" % os.getcwd(),markstr='', filename='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getSaveFileName(xparent,''.join(['Select file for save: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getSaveFileName(parent,''.join(['Select file for save: ', markstr]),os.path.join(xpath, filename).replace('\\','/')))
def mygetdir(parent=None, xpath="%s" % os.getcwd(),markstr='' ):
if parent is None:
xapp = QApplication(sys.argv)
xparent = QWidget()
returnfn = unicode(QFileDialog.getExistingDirectory(xparent,''.join(['Select directory:', markstr]), xpath))
xparent.destroy()
xapp.quit()
return returnfn
return unicode(QFileDialog.getExistingDirectory(parent,''.join(['Select directory:', markstr]), xpath))
def userinputcaller(parent, inputs=[('testnumber', int)], title='Enter values', cancelallowed=True):
problem=True
while problem:
idialog=userinputDialog(parent, inputs, title)
idialog.exec_()
problem=idialog.problem
if not idialog.ok and cancelallowed:
return None
inputs=[(tup[0], tup[1], s) for tup, s in zip(inputs, idialog.inputstrlist)]
return idialog.ans
class userinputDialog(QDialog):
def __init__(self, parent, inputs=[('testnumber', int, '')], title='Enter values'):
super(userinputDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.parent=parent
self.inputs=inputs
self.lelist=[]
for i, tup in enumerate(self.inputs):
lab=QLabel()
lab.setText(tup[0])
le=QLineEdit()
if len(tup)>2:
le.setText(tup[2])
self.lelist+=[le]
mainlayout.addWidget(lab, 0, i, 1, 1)
mainlayout.addWidget(le, 1, i, 1, 1)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox, 2, 0, len(inputs), 1)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
self.setLayout(mainlayout)
QMetaObject.connectSlotsByName(self)
self.problem=False
self.ok=False
def ExitRoutine(self):
self.ok=True
self.problem=False
self.ans=[]
self.inputstrlist=[str(le.text()).strip() for le in self.lelist]
for s, tup in zip(self.inputstrlist, self.inputs):
if tup[1]==str:
try:
self.ans+=[s]
except:
self.problem=True
break
else:
try:
n=myeval(s)
self.ans+=[tup[1](n)]
except:
self.problem=True
break
if self.problem:
idialog=messageDialog(self, 'problem with conversion of ' + tup[0])
idialog.exec_()
#class selectdbsessionsDialog(QDialog):
# def __init__(self, parent, ex_trange_techl, maxsessions=15, title='Select DB experiment sessions to analyze'):
# super(selectdbsessionsDialog, self).__init__(parent)
# self.setWindowTitle(title)
# mainlayout=QVBoxLayout()
#
# self.cblist=[]
# self.cbinds=[]
# for count, (ex, (t0, t1), techl) in enumerate(ex_trange_techl[:maxsessions]):
# cb=QCheckBox()
# cb.setText('exp %d: %s to %s, %s' %(ex, str(t0), str(t1), ','.join(techl)))
# cb.setChecked(False)
# mainlayout.addWidget(cb)
# self.cblist+=[cb]
# self.cbinds+=[[count]]
# if len(ex_trange_techl)>maxsessions:
# cb=QCheckBox()
# ex, (t0, t1), techl=ex_trange_techl[maxsessions]
# ex2, (t02, t12), techl2=ex_trange_techl[-1]
# techl=list(set(techl+techl2))
# cb.setText('exp %d-%d: %s to %s, %s' %(ex, ex2, str(t0), str(t12), ','.join(techl)))
# cb.setChecked(True)
# mainlayout.addWidget(cb)
# self.cblist+=[cb]
# self.cbinds+=[range(maxsessions, len(ex_trange_techl))]
# cb.setChecked(True)
#
# self.buttonBox = QDialogButtonBox(self)
# self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
# self.buttonBox.setOrientation(Qt.Horizontal)
# self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
# QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
# mainlayout.addWidget(self.buttonBox)
#
# QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
# self.setLayout(mainlayout)
# QMetaObject.connectSlotsByName(self)
# def ExitRoutine(self):
# self.selectinds=[]
# for cb, l in zip(self.cblist, self.cbinds):
# if cb.isChecked():
# self.selectinds+=l
#
class echem10axesWidget(QDialog):
def __init__(self, parent=None, ellabels=['A', 'B', 'C', 'D']):
super(echem10axesWidget, self).__init__(parent)
mainlayout=QVBoxLayout()
self.plotw=plotwidget(self)
self.plotw.fig.clf()
self.axl, self.stpl=make10ternaxes(fig=self.plotw.fig, ellabels=ellabels)
#### mainlayout.addWidget(self.plotw)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox)
self.setLayout(mainlayout)
# def plot(self, d, cb=True):
# if 'fomlabel' in d.keys():
# cblabel=d['fomlabel']
# else:
# cblabel=''
# scatter_10axes(d['comps'], d['fom'], self.stpl, s=18, edgecolors='none', cb=cb, cblabel=cblabel, cmap=d['cmap'], norm=d['norm'])
class echem30axesWidget(QDialog):
def __init__(self, parent=None, ellabels=['A', 'B', 'C', 'D']):
super(echem30axesWidget, self).__init__(parent)
mainlayout=QVBoxLayout()
self.plotw=plotwidget(self)
self.plotw.fig.clf()
self.axl, self.stpl=make30ternaxes(fig=self.plotw.fig, ellabels=ellabels)
mainlayout.addWidget(self.plotw)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox)
self.setLayout(mainlayout)
# def plot(self, d, cb=True):
# if 'fomlabel' in d.keys():
# cblabel=d['fomlabel']
# else:
# cblabel=''
# scatter_30axes(d['comps'], d['fom'], self.stpl, s=18, edgecolors='none', cb=cb, cblabel=cblabel, cmap=d['cmap'], norm=d['norm'])
class messageDialog(QDialog):
def __init__(self, parent=None, title=''):
super(messageDialog, self).__init__(parent)
self.setWindowTitle(title)
mainlayout=QGridLayout()
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
QObject.connect(self.buttonBox, SIGNAL("rejected()"), self.reject)
## mainlayout.addWidget(self.buttonBox, 0, 0)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
def ExitRoutine(self):
return
class plotwidget(FigureCanvas):
def __init__(self, parent, width=12, height=6, dpi=72, projection3d=False):
#plotdata can be 2d array for image plot or list of 2 1d arrays for x-y plot or 2d array for image plot or list of lists of 2 1D arrays
self.fig=Figure(figsize=(width, height), dpi=dpi)
if projection3d:
self.axes=self.fig.add_subplot(111, navigate=True, projection='3d')
else:
self.axes=self.fig.add_subplot(111, navigate=True)
self.axes.hold(True)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
#self.parent=parent
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
#NavigationToolbar(self, parent)
NavigationToolbar(self, self)
self.mpl_connect('button_press_event', self.myclick)
self.clicklist=[]
def myclick(self, event):
if not (event.xdata is None or event.ydata is None):
arrayxy=[event.xdata, event.ydata]
print 'clicked on image: array indeces ', arrayxy, ' using button', event.button
self.clicklist+=[arrayxy]
self.emit(SIGNAL("genericclickonplot"), [event.xdata, event.ydata, event.button, event.inaxes])
``` |
{
"source": "johnmgregoire/JCAPRamanDataProcess",
"score": 2
} |
#### File: johnmgregoire/JCAPRamanDataProcess/raman_agilefd_setup.py
```python
import sys,os, pickle, numpy, pylab, operator, itertools,numpy as np
import cv2
from shutil import copy as copyfile
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib.pyplot as plt
plt.ion()
from DataParseApp import dataparseDialog
from sklearn.decomposition import NMF
from PlateAlignViaEdge_v8 import MainMenu,save_raman_udi
projectpath=os.path.split(os.path.abspath(__file__))[0]
sys.path.append(os.path.join(projectpath,'ui'))
pythoncodepath=os.path.split(projectpath)[0]
jcapdataprocesspath=os.path.join(pythoncodepath, 'JCAPDataProcess')
sys.path.append(jcapdataprocesspath)
from VisualizeDataApp import visdataDialog
sys.path.append(os.path.join(jcapdataprocesspath,'AuxPrograms'))
from fcns_ui import *
from fcns_io import *
platemapvisprocesspath=os.path.join(pythoncodepath, 'JCAPPlatemapVisualize')
sys.path.append(platemapvisprocesspath)
from plate_image_align_Dialog import plateimagealignDialog
avefiles=[]
parentfold=r'K:\users\hte\Raman\39664\for AgileFD'
sys.path.append(parentfold)
from parameters_agilefd import *
print plateidstr
smp_fold=os.path.join(parentfold,'samples')
smp_spect_fold=os.path.join(parentfold,'samples','sample_spectra')
substrate_fold=os.path.join(parentfold,'substrate')
substrate_spect_fold=os.path.join(parentfold,'substrate','sample_spectra')
if gen_smp_substrate_spect:
if not 'sample_spectra' in os.listdir(parentfold):
subfolders=map(lambda x:os.path.join(parentfold,x),os.listdir(parentfold))
subfolders=[x for x in subfolders if os.path.isdir(x)]
for subf in subfolders:
if not 'sample_spectra' in os.listdir(subf):
continue
else:
temp_avefiles=filter(lambda x:x.endswith('_ave.rmn'),os.listdir(os.path.join(subf,'sample_spectra')))
avefiles+=map(lambda x:os.path.join(subf,'sample_spectra',x),temp_avefiles)
if not os.path.exists(smp_spect_fold):
os.makedirs(smp_spect_fold)
if not os.path.exists(substrate_spect_fold):
os.makedirs(substrate_spect_fold)
for x in avefiles:
if os.path.basename(x).split('Sample')[-1][0]=='-':
newf=x.replace('Sample-','Sample')
copyfile(x,os.path.join(substrate_spect_fold,os.path.basename(newf)))
else:
copyfile(x,os.path.join(smp_spect_fold,os.path.basename(x)))
if gen_udis:
mainapp=QApplication(sys.argv)
form=MainMenu(None, execute=False)
visui=visdataDialog(form, title='Visualize ANA, EXP, RUN data')
smp_pathd={'spectrafolder':smp_spect_fold,'udibasepath':os.path.join(smp_fold,'ave_rmn_')}
substrate_pathd={'spectrafolder':substrate_spect_fold,'udibasepath':os.path.join(substrate_fold,'ave_rmn_')}
for pathd in [smp_pathd,substrate_pathd]:
# visui.exec_()
save_raman_udi(visui,pathd,udi_ternary_projection_inds,plateidstr,saveall=False)
def combineudis(smp_fn,substrate_fn):
smp_udid=readudi(smp_fn)
substrate_udid=readudi(substrate_fn)
substrate_udid['sample_no']=-np.array(substrate_udid['sample_no'])
combine_udi_d=dict([(k,smp_udid[k] if k in ['ellabels'] or not isinstance(smp_udid[k],(list,np.ndarray)) else np.array(list(smp_udid[k])+list(substrate_udid[k]))) for k in smp_udid.keys()])
writeudifile(os.path.join(parentfold,'smp_substrate_'+os.path.basename(smp_fn)),combine_udi_d)
[combineudis(os.path.join(smp_fold,x),os.path.join(substrate_fold,x)) for x in os.listdir(smp_fold) if os.path.splitext(x)[-1]=='.udi']
``` |
{
"source": "johnmgregoire/JCAPVisualizeData",
"score": 2
} |
#### File: johnmgregoire/JCAPVisualizeData/csvfilewriter.py
```python
import numpy
from PyQt4.QtCore import *
from PyQt4.QtGui import *
def createcsvfilstr(datadlist, fomkeys, fmt='%.5e'):#for each sample, if fom not available inserts NaN
smparr=[d['sample_no'] for d in datadlist]
fomarr_smps=numpy.array([[(k in d['fomd'].keys() and (d['fomd'][k],) or (numpy.nan,))[0] for k in fomkeys] for d in datadlist])
lines=[','.join(['sample_no']+fomkeys)]
for smp, fomarr in zip(smparr, fomarr_smps):
lines+=[','.join(['%d' %smp]+[fmt %n for n in fomarr])]
s='\n'.join(lines)
return s
class selectexportfom(QDialog):
def __init__(self, parent, fomkeys, title='select FOMs to export. sample_no will be automatically included'):
super(selectexportfom, self).__init__(parent)
self.setWindowTitle(title)
self.parent=parent
self.fomkeys=fomkeys
vlayouts=[]
self.checkboxes=[]
for count, k in enumerate(fomkeys):
if count%10==0:
vlayout=QVBoxLayout()
vlayouts+=[vlayout]
cb=QCheckBox()
cb.setText(k)
if len(k)>2 and not ('ample' in k or 'x(mm)' in k or 'y(mm)' in k):
cb.setChecked(True)
vlayout.addWidget(cb)
self.checkboxes+=[cb]
mainlayout=QGridLayout()
for count, l in enumerate(vlayouts):
mainlayout.addLayout(l, 0, count)
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setGeometry(QRect(520, 195, 160, 26))
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
QObject.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
mainlayout.addWidget(self.buttonBox, min(10, len(self.checkboxes)), 0)
QObject.connect(self.buttonBox,SIGNAL("accepted()"),self.ExitRoutine)
#QObject.connect(self.buttonBox,SIGNAL("rejected()"),self.ExitRoutineCancel)
self.setLayout(mainlayout)
#self.resize(300, 250)
self.selectkeys=[]
def ExitRoutine(self):
for cb, k in zip(self.checkboxes, self.fomkeys):
if cb.isChecked():
self.selectkeys+=[k]
``` |
{
"source": "johnmgregoire/PythonCompositionPlots",
"score": 2
} |
#### File: johnmgregoire/PythonCompositionPlots/quaternary_FOM_stackedtern10.py
```python
import matplotlib.cm as cm
import numpy
import pylab
import operator, copy, os
#pylab.rc('font',**{'family':'serif''serif':['Times New Roman']})
#pylab.rcParams['font.family']='serif'
#pylab.rcParams['font.serif']='Times New Roman'
pylab.rc('font', family='serif', serif='Times New Roman')
#os.chdir('C:/Users/Gregoire/Documents/PythonCode/ternaryplot')
from myternaryutility import TernaryPlot
from myquaternaryutility import QuaternaryPlot
def make10ternaxes(ellabels=['A', 'B', 'C', 'D'], fig=None, fontsize=17):
if fig is None:
fig=pylab.figure(figsize=(12, 8))
ax_xc=[]
ax_yc=[]
xcdel=[.18, .19, .065, .1, .04, .05, .055, .03, .02, .02]
ax_yc=[.49, .68, .30, .74, .48, .24, .78, .58, .39, .21]
for i in range(10):
if i==0:
ax_xc+=[xcdel[i]]
else:
ax_xc+=[ax_xc[-1]+xcdel[i]]
#ax_yc+=[.5+((i%2)*2.-1.)*((i>0)*.1+.072*i/10)]
shape1=numpy.array([.35, 1.])
scales=[.82, 0.51, 0.39, 0.3, 0.22, 0.2, 0.17, 0.14, 0.11, 0.09]
axl=[]
for sc, xc, yc in zip(scales, ax_xc, ax_yc):
w, l=shape1*sc
axl+=[fig.add_axes([xc-w/2, yc-l/2, w, l])]
stpl=[]
xpos=[.27]*10
xpos[0:3]=[.38, .36, .33]
xpos[-1]=.18
for count, (ax, xp) in enumerate(zip(axl, xpos)):
stp=TernaryPlot(ax, ellabels=ellabels[:3], offset=.03)
if not fontsize is None:
stp.label(fontsize=fontsize)#,fontdict={'fontname':'Times New Roman'})
stpl+=[stp]
if not fontsize is None:
if count<9:
stp.ax.text(xp, .8, '%s$_{%.2f-%.2f}$' %(ellabels[3], (count*.1), ((count+1)*.1)-.01), ha='right', va='center', fontsize=fontsize)
else:
stp.ax.text(xp, .8, '%s$_{%.2f-%d}$' %(ellabels[3], (count*.1), 1), ha='right', va='center', fontsize=fontsize)
return axl, stpl
def scatter_10axes(comps, fom, stpl, s=18, cb=False, cbrect=(.85, .3, .04, .4), cblabel='', **kwargs):# for colorbar must pass kwargs norm and cmap and optionally cblabel
abc=comps[:, :3]
abc[abc.sum(axis=1)==0.]=numpy.array([1., 1., 1.])/3.
abc=numpy.array([c/c.sum() for c in abc])
d=comps[:, 3]
d30=numpy.round(d*30.)
dlims=numpy.array([0., 1., 2., 3.])
marks=[('o', 1., 1.), ('D', .9, .7),('s', .8, .5)]
sl=s*numpy.array([6.9, 3., 2.1, 1.5, 1.2, 1.35, 1.5, 1.8, 2.4, 3., 4.5])
scplots=[]
for i, (stp, sv) in enumerate(zip(stpl, sl)):
dl=dlims+(i*3.)
if i==9:
dl[-1]+=.01
for a, b, (m, sf, al) in zip(dl, dl[1:], marks):
inds=numpy.where((d30>=a) & (d30<b))[0]
#print a, b, len(inds)
if len(inds)>0:
scplots+=[stp.scatter(abc[inds], c=fom[inds], marker=m, s=sv*sf, alpha=al, **kwargs)]
if cb:
cbax=stp.ax.figure.add_axes(cbrect)
if 'extend' in kwargs.keys():
sm=cm.ScalarMappable(norm=kwargs['norm'], cmap=kwargs['cmap'], extend=kwargs['extend'])
else:
sm=cm.ScalarMappable(norm=kwargs['norm'], cmap=kwargs['cmap'])
sm.set_array(fom)
cb=stp.ax.figure.colorbar(sm, cax=cbax)
cb.set_label(cblabel, fontsize=18)
``` |
{
"source": "johnmick/botw-map.tk",
"score": 3
} |
#### File: botw-map.tk/user_authority/user_authority_server.py
```python
import time
import os
import json
import jwt
import tornado.ioloop
import tornado.web
from passlib.hash import argon2
jwt_secret = os.urandom(512)
cookie_secret = os.urandom(512)
user_hashes = {}
class CreateSlateUserHandler(tornado.web.RequestHandler):
def post(self):
try:
username = self.get_argument("username")
password = self.get_argument("password")
except:
self.write("Invalid Request")
return
if username not in user_hashes:
user_hashes[username] = argon2.using(rounds=513).hash(password)
self.write("Username registered try logging in")
else:
self.write("Username already exists")
class LoginHandler(tornado.web.RequestHandler):
def post(self):
if self.get_secure_cookie("token"):
token = jwt.decode(self.get_secure_cookie("token"), jwt_secret, algorithm='HS512')
self.write("Welcome back %s" % token["username"])
return
try:
username = self.get_argument("username")
password = self.get_argument("password")
except:
self.write("Invalid Request")
return
if username not in user_hashes:
self.write("Username does not exist")
return
if argon2.verify(password, user_hashes[username]):
self.write("Logged in by password okie")
self.set_secure_cookie(
"token", jwt.encode({"username": username}, jwt_secret, algorithm="HS512"),
httponly = True,
secure = True,
domain = ".botw-map.tk",
expires = time.time() + 86400
)
else:
self.write("Invalid password")
class LogoutHandler(tornado.web.RequestHandler):
def post(self):
print("Logout called")
if self.get_secure_cookie("token"):
self.clear_cookie("token")
self.write("Cookie Cleared")
return
if __name__ == "__main__":
port_number = 8889
address = '127.0.0.1'
app = tornado.web.Application(
handlers = [
(r"/auth/create-slate-user", CreateSlateUserHandler),
(r"/auth/login", LoginHandler),
(r"/auth/logout", LogoutHandler)
],
compress_response = True,
cookie_secret = cookie_secret,
xsfr_cookies = True
)
app.listen(port_number, address=address)
print("Listening for user auth requests on %s:%d" % (address,port_number))
tornado.ioloop.IOLoop.current().start()
``` |
{
"source": "johnmikep/sal",
"score": 2
} |
#### File: plugins/memory/memory.py
```python
import sal.plugin
GB = 1024 ** 2
MEM_4_GB = 4 * GB
MEM_775_GB = 7.75 * GB
MEM_8_GB = 8 * GB
TITLES = {
'ok': 'Machines with more than 8GB memory',
'warning': 'Machines with between 4GB and 8GB memory',
'alert': 'Machines with less than 4GB memory'}
class Memory(sal.plugin.Widget):
description = 'Installed RAM'
template = 'plugins/traffic_lights.html'
def get_context(self, machines, **kwargs):
context = self.super_get_context(machines, **kwargs)
context['ok_count'] = self._filter(machines, 'ok').count()
context['ok_label'] = '8GB +'
context['warning_count'] = self._filter(machines, 'warning').count()
context['warning_label'] = '4GB +'
context['alert_count'] = self._filter(machines, 'alert').count()
context['alert_label'] = '< 4GB'
return context
def filter(self, machines, data):
if data not in TITLES:
return None, None
return self._filter(machines, data), TITLES[data]
def _filter(self, machines, data):
if data == 'ok':
machines = machines.filter(memory_kb__gte=MEM_8_GB)
elif data == 'warning':
machines = machines.filter(memory_kb__range=[MEM_4_GB, MEM_775_GB])
elif data == 'alert':
machines = machines.filter(memory_kb__lt=MEM_4_GB)
return machines
``` |
{
"source": "johnmikep/zentral",
"score": 2
} |
#### File: server/accounts/auth_backends.py
```python
import logging
from django.contrib.auth.backends import ModelBackend
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from .models import User
logger = logging.getLogger("server.accounts.auth_backends")
class Saml2Backend(ModelBackend):
def authenticate(self, request, session_info):
username = None
ava = session_info.get('ava')
if ava:
uid = ava.get('uid')
if uid:
username = uid[0]
if not username and 'name_id' in session_info:
username = session_info['name_id'].text
if not username:
logger.error("NO USERNAME FOUND")
return None
try:
validate_email(username)
except ValidationError:
email = <EMAIL>".<EMAIL>(username)
else:
email = username
user, created = User.objects.update_or_create(username=username,
defaults={"email": email,
"is_remote": True})
if not created:
if user.has_usable_password():
logger.error("User %s with password exists", username)
return user
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
```
#### File: tests/osquery/test_api_views.py
```python
import json
from django.urls import reverse
from django.test import TestCase, override_settings
from django.utils.crypto import get_random_string
from zentral.contrib.inventory.models import EnrollmentSecret, MachineSnapshot, MetaBusinessUnit
from zentral.contrib.osquery.conf import (INVENTORY_QUERY_NAME,
INVENTORY_DISTRIBUTED_QUERY_PREFIX)
from zentral.contrib.osquery.models import Configuration, Enrollment
from zentral.core.probes.conf import all_probes
from zentral.core.probes.models import ProbeSource
from zentral.utils.api_views import make_secret
INVENTORY_QUERY_SNAPSHOT = [
{'build': '15D21',
'major': '10',
'minor': '11',
'name': 'Mac OS X',
'patch': '3',
'table_name': 'os_version'},
{'computer_name': 'godzilla',
'cpu_brand': 'Intel(R) Core(TM)2 Duo CPU T9600 @2.80GHz',
'cpu_logical_cores': '2',
'cpu_physical_cores': '2',
'cpu_subtype': 'Intel 80486',
'cpu_type': 'i486',
'hardware_model': 'MacBookPro5,1 ', # extra space must be removed by osquery module
'hardware_serial': '0123456789',
'hostname': 'godzilla.box',
'physical_memory': '8589934592',
'table_name': 'system_info'},
{'address': '192.168.1.123',
'broadcast': '192.168.1.255',
'interface': 'en1',
'mac': '00:23:ac:a8:49:a9',
'mask': '255.255.255.0',
'table_name': 'network_interface'}
]
OSX_APP_INSTANCE = {
"bundle_id": "com.agilebits.onepassword4-updater",
"bundle_name": "1Password Updater",
"bundle_path": "/Applications/1Password 6.app/Contents/Helpers/1Password Updater.app",
"bundle_version": "652003",
"bundle_version_str": "6.5.2",
"table_name": "apps"
}
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class OsqueryAPIViewsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.configuration = Configuration.objects.create(name=get_random_string(256))
cls.meta_business_unit = MetaBusinessUnit.objects.create(name=get_random_string(64))
cls.enrollment_secret = EnrollmentSecret.objects.create(meta_business_unit=cls.meta_business_unit)
cls.enrollment = Enrollment.objects.create(configuration=cls.configuration,
secret=cls.enrollment_secret)
def post_as_json(self, url_name, data):
return self.client.post(reverse("osquery:{}".format(url_name)),
json.dumps(data),
content_type="application/json")
def test_enroll_405(self):
response = self.client.get(reverse("osquery:enroll"))
self.assertEqual(response.status_code, 405)
self.assertCountEqual(["POST", "OPTIONS"], (m.strip() for m in response["Allow"].split(",")))
def test_enroll_bad_json(self):
response = self.client.post(reverse("osquery:enroll"))
self.assertEqual(response.status_code, 400)
response = self.client.post(reverse("osquery:enroll"),
data="lkjadslkjdsalkdjas",
content_type="application/json")
self.assertEqual(response.status_code, 400)
def test_enroll_missing_json_keys(self):
response = self.post_as_json("enroll", {"no_enroll_secret_key": True})
self.assertEqual(response.status_code, 400)
def test_enroll_bad_secret(self):
response = self.post_as_json(
"enroll",
{"enroll_secret": "INVALID ENROLL SECRET",
"host_details": {"system_info": {"hardware_serial": get_random_string(32)}}}
)
self.assertContains(response, "Unknown enrolled machine", status_code=403)
def test_enroll_enroll_secret_bad_module_old_way(self):
# TODO: deprecate and remove
secret = "{}$SERIAL${}".format(make_secret("zentral.inexisting.module"), get_random_string(32))
response = self.post_as_json("enroll", {"enroll_secret": secret})
self.assertContains(response, "Invalid module", status_code=403)
def test_enroll_not_machine_serial_number(self):
response = self.post_as_json("enroll", {"enroll_secret": self.enrollment.secret.secret})
self.assertContains(response, "No serial number", status_code=403)
def test_enroll_ok_old_way(self):
# TODO: deprecate and remove
machine_serial_number = get_random_string(32)
machine_test_qs = MachineSnapshot.objects.filter(source__module="zentral.contrib.osquery",
serial_number=machine_serial_number)
# no machine
self.assertEqual(machine_test_qs.count(), 0)
# enroll machine
secret = "{}$SERIAL${}".format(make_secret("zentral.contrib.osquery"),
machine_serial_number)
response = self.post_as_json("enroll", {"enroll_secret": secret})
json_response = response.json()
self.assertCountEqual(["node_key"], json_response.keys())
self.assertEqual(machine_test_qs.count(), 1)
machine = machine_test_qs.all()[0]
self.assertEqual(machine.reference, json_response["node_key"])
def test_enroll_with_host_identifier_ok(self):
machine_serial_number = get_random_string(32)
machine_test_qs = MachineSnapshot.objects.filter(source__module="zentral.contrib.osquery",
serial_number=machine_serial_number)
# no machine
self.assertEqual(machine_test_qs.count(), 0)
# enroll machine
secret = "{}$SERIAL${}".format(make_secret("zentral.contrib.osquery"),
machine_serial_number)
response = self.post_as_json("enroll", {"enroll_secret": secret,
"host_identifier": "godzilla"})
json_response = response.json()
self.assertCountEqual(["node_key"], json_response.keys())
self.assertEqual(machine_test_qs.count(), 1)
machine = machine_test_qs.all()[0]
self.assertEqual(machine.reference, json_response["node_key"])
self.assertEqual(machine.system_info.computer_name, "godzilla")
def test_re_enroll(self):
machine_serial_number = get_random_string(32)
# enroll machine
secret = "{}$SERIAL${}".format(make_secret("zentral.contrib.osquery"),
machine_serial_number)
response = self.post_as_json("enroll", {"enroll_secret": secret,
"host_identifier": "godzilla"})
json_response = response.json()
node_key = json_response["node_key"]
# re-enroll machine
response = self.post_as_json("enroll", {"enroll_secret": secret,
"host_identifier": "godzilla"})
json_response = response.json()
self.assertEqual(json_response["node_key"], node_key)
def test_config_405(self):
response = self.client.get(reverse("osquery:enroll"))
self.assertEqual(response.status_code, 405)
self.assertCountEqual(["POST", "OPTIONS"], (m.strip() for m in response["Allow"].split(",")))
def test_config_missing_node_key(self):
response = self.post_as_json("config", {"godzilla": "ffm"})
self.assertContains(response, "Missing node_key", status_code=403)
def test_config_wrong_node_key(self):
response = self.post_as_json("config", {"node_key": "godzilla"})
self.assertContains(response, "Wrong node_key", status_code=403)
def enroll_machine(self):
machine_serial_number = get_random_string(64)
response = self.post_as_json(
"enroll",
{"enroll_secret": self.enrollment.secret.secret,
"host_details": {"system_info": {"hardware_serial": machine_serial_number}}}
)
self.assertEqual(response.status_code, 200)
return machine_serial_number, response.json()["node_key"]
def test_config_ok(self):
_, node_key = self.enroll_machine()
response = self.post_as_json("config", {"node_key": node_key})
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
json_response = response.json()
self.assertIn("schedule", json_response)
schedule = json_response["schedule"]
self.assertIn(INVENTORY_QUERY_NAME, schedule)
def test_osx_app_instance_schedule(self):
_, node_key = self.enroll_machine()
self.post_default_inventory_query_snapshot(node_key)
# machine platform = MACOS now
response = self.post_as_json("config", {"node_key": node_key})
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
json_response = response.json()
self.assertIn("schedule", json_response)
schedule = json_response["schedule"]
self.assertIn(INVENTORY_QUERY_NAME, schedule)
self.assertIn(" 'apps' ", schedule[INVENTORY_QUERY_NAME]["query"])
def test_distributed_read_405(self):
response = self.client.get(reverse("osquery:distributed_read"))
self.assertEqual(response.status_code, 405)
self.assertCountEqual(["POST", "OPTIONS"], (m.strip() for m in response["Allow"].split(",")))
def test_distributed_read_default_inventory_query(self):
_, node_key = self.enroll_machine()
response = self.post_as_json("distributed_read", {"node_key": node_key})
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
json_response = response.json()
query_names = ["{}{}".format(INVENTORY_DISTRIBUTED_QUERY_PREFIX, t)
for t in ("os_version", "system_info", "uptime", "network_interface")]
self.assertCountEqual(json_response["queries"], query_names)
def post_default_inventory_query_snapshot(self, node_key, with_app=False):
snapshot = list(INVENTORY_QUERY_SNAPSHOT)
if with_app:
snapshot.append(OSX_APP_INSTANCE)
self.post_as_json("distributed_write",
{"node_key": node_key,
"queries": {"{}{}".format(INVENTORY_DISTRIBUTED_QUERY_PREFIX, i["table_name"]): [i]
for i in snapshot}
})
def test_default_inventory_query_snapshot(self):
machine_serial_number, node_key = self.enroll_machine()
self.post_default_inventory_query_snapshot(node_key)
ms = MachineSnapshot.objects.current().get(serial_number=machine_serial_number)
self.assertEqual(ms.os_version.build, INVENTORY_QUERY_SNAPSHOT[0]["build"])
self.assertEqual(ms.system_info.hardware_model, INVENTORY_QUERY_SNAPSHOT[1]["hardware_model"].strip())
def test_distributed_read_one_query_plus_default_inventory_query(self):
_, node_key = self.enroll_machine()
# one distributed query probe
dq = "select * from users;"
probe_source = ProbeSource.objects.create(
name="Shellac",
status=ProbeSource.ACTIVE,
model="OsqueryDistributedQueryProbe",
body={"distributed_query": dq}
)
dq_name = "dq_{}".format(probe_source.pk)
# simulate an all_probes sync
all_probes.clear()
# distributed read
response = self.post_as_json("distributed_read", {"node_key": node_key})
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
json_response = response.json()
query_names = ["{}{}".format(INVENTORY_DISTRIBUTED_QUERY_PREFIX, t)
for t in ("os_version", "system_info", "uptime", "network_interface")]
query_names.append(dq_name)
self.assertCountEqual(json_response["queries"], query_names)
self.assertEqual(json_response["queries"][dq_name], dq)
# post default inventory snapshot.
self.post_default_inventory_query_snapshot(node_key)
# 2nd distributed read still has the inventory query
# but with the apps now that we know what kind of machine it is
response = self.post_as_json("distributed_read", {"node_key": node_key})
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
json_response = response.json()
query_names = ["{}{}".format(INVENTORY_DISTRIBUTED_QUERY_PREFIX, t)
for t in ("os_version", "system_info", "uptime", "network_interface", "apps")]
self.assertCountEqual(json_response["queries"], query_names)
# post default inventory snapshot with one app
self.post_default_inventory_query_snapshot(node_key, with_app=True)
# 3rd distributed read empty (2 snapshots done and no other distributed queries available)
response = self.post_as_json("distributed_read", {"node_key": node_key})
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
json_response = response.json()
self.assertEqual(json_response, {"queries": {}})
def test_distributed_write_405(self):
response = self.client.get(reverse("osquery:distributed_write"))
self.assertEqual(response.status_code, 405)
self.assertCountEqual(["POST", "OPTIONS"], (m.strip() for m in response["Allow"].split(",")))
def test_distributed_write(self):
_, node_key = self.enroll_machine()
# query
probe_source = ProbeSource.objects.create(
name="Shellac",
status=ProbeSource.ACTIVE,
model="OsqueryDistributedQueryProbe",
body={"distributed_query": "select username from users;"}
)
response = self.post_as_json("distributed_write",
{"node_key": node_key,
"queries": {"dq_{}".format(probe_source.pk): [{"username": "godzilla"}]}})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {})
def test_log_405(self):
response = self.client.get(reverse("osquery:log"))
self.assertEqual(response.status_code, 405)
self.assertCountEqual(["POST", "OPTIONS"], (m.strip() for m in response["Allow"].split(",")))
def test_log_default_inventory_query(self):
machine_serial_number, node_key = self.enroll_machine()
snapshot = [
{
"build": "15G1108",
"major": "10",
"minor": "11",
"name": "Mac OS X",
"patch": "6",
"table_name": "os_version"
},
{
"computer_name": "godzilla",
"cpu_brand": "Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz",
"cpu_logical_cores": "4",
"cpu_physical_cores": "2",
"cpu_subtype": "Intel x86-64h Haswell",
"cpu_type": "x86_64h",
"hardware_model": "MacBookPro11,1",
"hardware_serial": machine_serial_number,
"hostname": "godzilla",
"physical_memory": "17179869184",
"table_name": "system_info"
},
{
"address": "192.168.1.17",
"broadcast": "192.168.1.255",
"interface": "en3",
"mac": "38:c9:87:21:b1:32",
"mask": "255.255.255.0",
"table_name": "network_interface"
},
OSX_APP_INSTANCE,
]
post_data = {
"node_key": node_key,
"log_type": "result",
"data": [
{"name": INVENTORY_QUERY_NAME,
"unixTime": '1480605737',
"snapshot": snapshot}
]
}
# no machine named godzilla
self.assertEqual(MachineSnapshot.objects.filter(reference=node_key,
system_info__computer_name="godzilla").count(), 0)
# post new snapshot
response = self.post_as_json("log", post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/json")
json_response = response.json()
self.assertEqual(json_response, {})
# new machine snapshot, one of them is godzilla
self.assertEqual(MachineSnapshot.objects.filter(reference=node_key).count(), 2)
self.assertEqual(MachineSnapshot.objects.filter(reference=node_key,
system_info__computer_name="godzilla").count(), 1)
self.assertEqual(MachineSnapshot.objects.filter(
reference=node_key,
osx_app_instances__app__bundle_name="1Password Updater").count(), 1)
def test_log_status(self):
_, node_key = self.enroll_machine()
post_data = {
"node_key": node_key,
"log_type": "status",
"data": [
{'filename': 'scheduler.cpp',
'line': '63',
'message': 'Executing scheduled query: macos-attacks-query-pack_604dc4d3: '
"select * from startup_items where path like '%iWorkServices%';",
'severity': '0',
'version': '2.1.2'}
]
}
response = self.post_as_json("log", post_data)
self.assertEqual(response["Content-Type"], "application/json")
json_response = response.json()
self.assertEqual(json_response, {})
def test_log_event_format_result(self):
_, node_key = self.enroll_machine()
post_data = {
"node_key": node_key,
"log_type": "result",
"data": [
{'name': 'godzilla_kommt-343hdwkl',
'action': 'added',
'hostIdentifier': 'godzilla.local',
'columns': {'name': 'Dropbox', 'pid': '1234', 'port': '17500'},
'unixTime': '1480605737'}
]
}
response = self.post_as_json("log", post_data)
self.assertEqual(response["Content-Type"], "application/json")
json_response = response.json()
self.assertEqual(json_response, {})
def test_log_snapshot_format_result(self):
_, node_key = self.enroll_machine()
post_data = {
"node_key": node_key,
"log_type": "result",
"data": [
{'name': 'godzilla_kommt-343hdwkl',
'action': 'snapshot',
'hostIdentifier': 'godzilla.local',
"snapshot": [
{
"parent": "0",
"path": "/sbin/launchd",
"pid": "1"
},
{
"parent": "1",
"path": "/usr/sbin/syslogd",
"pid": "51"
}
],
'unixTime': '1480605737'}
]
}
response = self.post_as_json("log", post_data)
self.assertEqual(response["Content-Type"], "application/json")
json_response = response.json()
self.assertEqual(json_response, {})
``` |
{
"source": "johnmillner/SigSent",
"score": 3
} |
#### File: sigsent/src/enableFG.py
```python
import rospy
import pigpio
def enableFG():
#on current PiHat for SigSent, enable pin is on GPIO 4
gpio_pin = 4
#enable node
rospy.init_node('enableFG')
#init pigpio
p = pigpio.pi()
#set GPIO to HIGH, enabling i2c bus
p.write(gpio_pin, 1)
#happy little message to let us know whats going on
rospy.loginfo("Fuel Gauge enabled on pin {}".format( gpio_pin ))
if __name__ == '__main__':
try:
enableFG()
except rospy.ROSInterruptException:
rospy.logerror("Could not enable Fuel Gauge pin on GPIO pin {}".format( gpio_pin) )
pass
```
#### File: SigSent/Gait/pi_spi_modules.py
```python
class Message:
"""
Functions called by the user return a list of bytes containing message header and needed content messages.
The SPI library from pigpio take a list of bytes and send them, so this makes life easy!
"""
def __init__(self):
# Header message bits define what message is coming next so that the MCU
# knows how to parse it
self.mode_change = 0b00110000
self.walking_move = 0b00001100
self.esc = 0b00000011
# Directions define how the walking robot should move through gait and define
# what direction the ESCs need to send the motors in
self.fwd = 0b00000000
self.left = 0b00001111
self.right = 0b11110000
self.back = 0b11111111
# Mode type comes after mode_change header and tells the hexapod to switch to
# one of the following mobility mechanism
self.driving_mode = 0b00001111
self.walking_mode = 0b11110000
# Creates a header message with a given type provided by the user
def _create_header(self, mode_change=False, walking_move=False, esc=False):
header = 0b00000000
if mode_change:
header |= self.mode_change
elif walking_move:
header |= self.walking_move
elif esc:
header |= self.esc
return header
def create_walking_message(self, fwd=False, left=False, right=False):
messages = []
messages.append(self._create_header(walking_move=True))
messages.append(self.create_direction_message(fwd=fwd, left=left, right=right))
return messages
def create_mode_change_message(self, driving=False, walking=False):
messages = []
messages.append(self._create_header(mode_change=True))
message = 0b00000000
if driving:
message |= self.driving_mode
elif walking:
message |= self.walking_mode
messages.append(message)
return messages
def create_direction_message(self, fwd=False, left=False, right=False, back=False):
message = 0b00000000
if fwd:
message |= self.fwd
elif left:
message |= self.left
elif right:
message |= self.right
elif back:
message |= self.back
return message
def create_esc_message(self, fwd=False, left=False, right=False, back=False, speed=0):
messages = []
messages.append(self._create_header(esc=True))
# Speed can only be stored in one byte. I check the bit_length rather than the int
# value specifically because Python has no distinction between signed/unsigned ints
if speed.bit_length() > 8:
return None
messages.append(self.create_direction_message(fwd=fwd, left=left, right=right, back=back))
messages.append(speed)
return messages
``` |
{
"source": "JohnminerIv/b-g-club-tutorials",
"score": 4
} |
#### File: b-g-club-tutorials/day_3/brainstorm_challenge.py
```python
class HuskyBucksSystem:
"""
This is an example command line version of a husky bucks system
"""
def __init__(self) -> None:
self.accounts = {}
def add_student(self, student_id, name) -> None:
self.accounts[student_id] = Student(name)
def is_id_valid(self, id):
return self.accounts.get(id) != None
def check_for_funds(self, student_id, amount):
return self.get_balance(student_id) >= amount
def get_balance(self, student_id):
return self.accounts[student_id].current_balance()
def debit_student(self, student_id, amount):
self.accounts[student_id].debit(amount)
def credit_student(self, student_id, amount):
self.accounts[student_id].credit(amount)
def start(self):
print('Hello thanks for banking with husky')
while True:
user_input = None
while user_input not in ('a', 'check', 'c', 'd', 'e'):
if user_input != None:
print("That wasn't an option")
print('What would you like to do?')
print('Enter (a) to add a student')
print('Enter (check) to check a students funds')
print('Enter (c) to credit a student')
print('Enter (d) to debit a student')
print('Enter (e) to exit the system')
user_input = input('Please enter an option: ')
if user_input == 'a':
name = input('Please enter a name: ')
id = self.get_id()
self.add_student(id, name)
print(f'{name} was added to the Husky bucks system')
elif user_input == 'check':
id = self.get_id()
balance = self.get_balance(id)
print(f'The balance of account: {id} is {balance}')
elif user_input == 'c':
id = self.get_id()
if self.is_id_valid(id) == True:
amount = self.get_amount()
old_balance = self.get_balance(id)
self.credit_student(id, amount)
new_balance = self.get_balance(id)
print(f'The balance of account: {id} was {old_balance} and is now {new_balance}')
else:
print('thats not a valid id.')
elif user_input == 'd':
id = self.get_id()
if self.is_id_valid(id) == True:
amount = self.get_amount()
old_balance = self.get_balance(id)
self.debit_student(id, amount)
new_balance = self.get_balance(id)
print(f'The balance of account: {id} was {old_balance} and is now {new_balance}')
else:
print('thats not a valid id.')
elif user_input == 'e':
break
def get_amount(self):
amount = None
user_input = None
while type(amount) is not int:
if user_input is not None:
print("That wasn't a number")
user_input = input('Please enter an amount: ')
try:
amount = int(user_input)
except:
pass
return amount
def get_id(self):
id = None
user_input = None
while type(id) is not int:
if user_input is not None:
print("That wasn't an id")
user_input = input('Please enter an id: ')
try:
potential_id = int(user_input)
id = potential_id
except:
pass
return id
class Student:
def __init__(self, name):
self.name = name
self.account_balance = 0
def credit(self, amount):
self.account_balance += amount
def debit(self, amount):
self.account_balance -= amount
def current_balance(self):
return self.account_balance
if __name__ == '__main__':
hbs = HuskyBucksSystem()
hbs.start()
```
#### File: b-g-club-tutorials/day_3/classes.py
```python
class Person:
def __init__(self, name: str, age: int) -> None:
"""
Initializes the person class requires a name and an age.
"""
self.name = name
self.age = age
self.friends = []
def introduce(self) -> str:
"""
Introduces the person.
"""
return f'Hello my name is {self.name} and I am {self.age} years old.'
def get_older(self, years: int) -> None:
"""
Ages the person by an amount of years
"""
self.age += years
def add_friend(self, person) -> None:
"""
Adds another person to this persons friend list
"""
self.friends.append(person)
return self
def list_friends(self) -> str:
"""
Returns a string containg the names of this persons friends
"""
friends = ''
for friend in self.friends:
friends += friend.name + ' '
return self.name + "'s friends list is " + friends
def __str__(self) -> str:
return f'Person {self.name}, age {self.age}'
"""
By using classes you can inherit from another class and receive their methods or
overide them with something else.
"""
class Student (Person):
def __init__(self, name, age, grade):
"""
Initializes a student.
"""
super().__init__(name, age)
self.grade = grade
def introduce(self) -> str:
"""
Introduces a student.
"""
return f"I'm a student my name is {self.name}. I'm in grade \
{self.grade}, and I'm {self.age} years old"
def __str__(self) -> str:
return f'Student {self.name}, grade {self.grade}'
"""
Object oreiented programming is useful and many if not all jobs for coders will
require you to be familiar with how it works but its also important to note that
you can do much the same thing without it. For most of your personal projects
you can decide to use object oriented or functional paradigms or a mix of both
whatever you want.
"""
def create_person(name, age) -> dict:
"""
Creates a dictionary representation of a person
"""
return {'name': name, 'age': age, 'friends': []}
def introduce(person) -> str:
"""
Introduces a dictionary representation of a person
"""
return f'Hello my name is {person["name"]} and I am {person["age"]} years old.'
def get_older(person, years) -> None:
"""
Increments the age of a person
"""
person['age'] += years
def string_rep(person) -> str:
"""
Represents the dictionary representation of a preson as a string
"""
return f'Person {person["name"]}, age {person["age"]}'
def add_friend(person, person2) -> None:
"""
Adds a person to this functional persons friends list
"""
person['friends'].append(person2)
return person
def list_friends(person) -> str:
"""
Returns a string containg the names of this functional persons friends
"""
friends = ''
for friend in person['friends']:
friends += friend['name'] + ' '
return person['name'] + "'s friends list is " + friends
def create_student(name, age, grade) -> dict:
"""
Creates a dictionary representation of a student.
"""
student = create_person(name, age)
student['grade'] = grade
return student
def introduce_student(student) -> str:
"""
Introduces a functional student.
"""
return f"I'm a student my name is {student['name']}. I'm in grade \
{student['grade']}, and I'm {student['age']} years old"
if __name__ == '__main__':
print('Doing some things in an object oriented way')
person1 = Person('John', 20)
print(person1.introduce())
person1.get_older(6)
print(person1.introduce())
student1 = Student('Baki', 18, 12)
print(student1.introduce())
student1.get_older(3) # Still can get older even if the method isn't eplicately defined because it subclasses person
print(student1.introduce())
student1.add_friend(person1)
print(student1.list_friends())
print('')
print('*' * 80)
print('')
print('Doing the same thing functionaly.')
person2 = create_person('John', 20)
print(introduce(person2))
get_older(person2, 6)
print(introduce(person2))
student2 = create_student('Baki', 18, 12)
print(introduce_student(student2))
get_older(student2,3)
print(introduce_student(student2))
add_friend(student2, person2)
print(list_friends(student2))
``` |
{
"source": "JohnminerIv/modular_dockerized_flask",
"score": 2
} |
#### File: blueprints/auth/routes.py
```python
from flask import (Blueprint, render_template)
bp = Blueprint('auth', __name__, url_prefix='/auth', template_folder='templates')
@bp.route('/login')
def login():
"""Page for logging in"""
return render_template('auth/login.html')
@bp.route('/register')
def register():
"""Page for registering"""
return render_template('auth/register.html')
``` |
{
"source": "johnmiroki/python2sky",
"score": 2
} |
#### File: python2sky/context/common.py
```python
URL = "url"
STATUS_CODE = "status_code"
DB_TYPE = "db.type"
DB_INSTANCE = "db.instance"
DB_STATEMENT = "db.statement"
DB_BIND_VARIABLES = "db.bind_vars"
MQ_QUEUE = "mq.queue"
MQ_BROKER = "mq.broker"
MQ_TOPIC = "mq.topic"
METHOD = "http.method"
UNKNOWN = 0
DATABASE = 1
RPC_FRAMEWORK = 2
HTTP = 3
MQ = 4
CACHE = 5
class Component:
def __init__(self, id, name):
self.id = id
self.name = name
FLASK = Component(6001, "Flask")
REQUESTS = Component(6002, "requests")
def set_component(span, component):
span.component_id = component.name
span.component_id = component.id
def set_layer_db(span):
span.layer = DATABASE
def set_layer_rpc_framework(span):
span.layer = RPC_FRAMEWORK
def set_layer_http(span):
span.layer = HTTP
def set_layer_mq(span):
span.layer = MQ
def set_layer_cache(span):
span.layer = CACHE
def set_tag_url(span, v):
span.tag(URL, v)
def set_tag_method(span, v):
span.tag(METHOD, v)
def set_tag_status_code(span, v):
span.tag(STATUS_CODE, v)
def set_tag_db_type(span, v):
span.tag(DB_TYPE, v)
def set_tag_db_instance(span, v):
span.tag(DB_INSTANCE, v)
def set_tag_db_statement(span, v):
span.tag(DB_STATEMENT, v)
def set_tag_db_bind_variables(span, v):
span.tag(DB_BIND_VARIABLES, v)
def set_tag_mq_queue(span, v):
span.tag(MQ_QUEUE, v)
def set_tag_mq_broker(span, v):
span.tag(MQ_BROKER, v)
def set_tag_mq_topic(span, v):
span.tag(MQ_TOPIC, v)
```
#### File: python2sky/context/context_carrier.py
```python
from python2sky.proto.common.trace_common_pb2 import CrossProcess
from python2sky.proto.language_agent_v2.trace_pb2 import SegmentReference
from python2sky.util.common import null_value, build_unique_id
from python2sky.util.string_util import is_empty
from python2sky.util.uuid_util import global_id_to_string, string_to_global_id
from python2sky.util.base64_util import encode, decode
def encode_compressed_field(id, text):
if id and id != 0:
return encode(str(id))
return encode("#" + text)
def decode_field(text):
text = decode(text)
if text and text.startswith("#"):
return text[1:], 0
return None, int(text)
class ContextCarrier:
def __init__(self):
self.trace_segment_id = None
self.span_id = -1
self.parent_service_instance_id = 0
self.entry_service_instance_id = 0
self.peer = None
self.peer_id = None
self.entry_endpoint_name = None
self.parent_endpoint_name = None
self.trace_id = None
self.sample = None
self.parent_endpoint_id = 0
self.network_address_id = None
self.entry_endpoint_id = None
self.type = None
def deserialize(self, text):
parts = text.split("-", 9)
self.sample = parts[0]
self.trace_id = string_to_global_id(decode(parts[1]))
self.trace_segment_id = string_to_global_id(decode(parts[2]))
self.span_id = int(parts[3])
self.parent_service_instance_id = int(parts[4])
self.entry_service_instance_id = int(parts[5])
self.peer, self.peer_id = decode_field(parts[6])
self.entry_endpoint_name, self.entry_endpoint_id = decode_field(parts[7])
self.parent_endpoint_name, self.parent_endpoint_id = decode_field(parts[8])
def serialize(self):
if self.trace_id is None:
return None
return "-".join(["1",
encode(global_id_to_string(self.trace_id)),
encode(global_id_to_string(self.trace_segment_id)),
str(self.span_id),
str(self.parent_service_instance_id),
str(self.entry_service_instance_id),
encode_compressed_field(self.network_address_id, self.peer),
encode_compressed_field(self.entry_endpoint_id, self.entry_endpoint_name),
encode_compressed_field(self.parent_endpoint_id, self.parent_endpoint_name)
])
def transform(self):
segment_reference = SegmentReference()
if self.type == CrossProcess:
segment_reference.refType = self.type
if null_value(self.peer_id):
segment_reference.networkAddress = self.peer
else:
segment_reference.networkAddressId = self.peer_id
else:
segment_reference.refType = self.type
segment_reference.parentServiceInstanceId = self.parent_service_instance_id
segment_reference.entryServiceInstanceId = self.entry_service_instance_id
segment_reference.parentTraceSegmentId.CopyFrom(build_unique_id(self.trace_segment_id))
segment_reference.parentSpanId = self.span_id
if null_value(self.entry_endpoint_id):
if not is_empty(self.entry_endpoint_name):
segment_reference.entryEndpoint = self.entry_endpoint_name
else:
segment_reference.entryEndpointId = self.entry_endpoint_id
if null_value(self.parent_endpoint_id):
if not is_empty(self.parent_endpoint_name):
segment_reference.parentEndpoint = self.parent_endpoint_name
else:
segment_reference.parentEndpointId = self.parent_endpoint_id
return segment_reference
```
#### File: python2sky/context/trace_segment.py
```python
from python2sky import config
from python2sky.proto.common.trace_common_pb2 import UpstreamSegment
from python2sky.proto.language_agent_v2.trace_pb2 import SegmentObject
from python2sky.util.common import build_unique_id
from python2sky.util.date_util import current_milli_time
from python2sky.util.uuid_util import global_id_generator
class TraceSegment:
def __init__(self):
self.application_instance_id = config.SERVICE_INSTANCE_ID
self.service_id = config.SERVICE_ID
self.trace_segment_id = global_id_generator(self.application_instance_id)
self.refs = []
self.spans = []
self.create_time = current_milli_time()
self.id = global_id_generator(self.application_instance_id)
self.trace_ids = [self.id]
self.is_size_limited = False
def ref(self, context_carrier):
if context_carrier not in self.refs:
self.refs.append(context_carrier)
def related_global_traces(self, trace_id):
if len(self.trace_ids) > 0 and self.id == self.trace_ids[0]:
self.trace_ids.remove(self.id)
if trace_id not in self.trace_ids:
self.trace_ids.append(trace_id)
def get_related_global_traces(self):
return self.trace_ids
def archive(self, span):
span.end()
self.spans.append(span)
def transform(self):
upstream_segment = UpstreamSegment()
for trace_id in self.trace_ids:
upstream_segment.globalTraceIds.append(build_unique_id(trace_id))
segment_obj = SegmentObject()
segment_obj.traceSegmentId.CopyFrom(build_unique_id(self.trace_segment_id))
for span in self.spans:
segment_obj.spans.append(span.transform())
segment_obj.serviceId = self.service_id
segment_obj.serviceInstanceId = self.application_instance_id
segment_obj.isSizeLimited = self.is_size_limited
upstream_segment.segment = segment_obj.SerializeToString()
return upstream_segment
```
#### File: proto/register/Register_pb2_grpc.py
```python
import grpc
from python2sky.proto.common import common_pb2 as common_dot_common__pb2
from python2sky.proto.register import Register_pb2 as register_dot_Register__pb2
class RegisterStub(object):
"""register service for ApplicationCode, this service is called when service starts.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.doServiceRegister = channel.unary_unary(
'/Register/doServiceRegister',
request_serializer=register_dot_Register__pb2.Services.SerializeToString,
response_deserializer=register_dot_Register__pb2.ServiceRegisterMapping.FromString,
)
self.doServiceInstanceRegister = channel.unary_unary(
'/Register/doServiceInstanceRegister',
request_serializer=register_dot_Register__pb2.ServiceInstances.SerializeToString,
response_deserializer=register_dot_Register__pb2.ServiceInstanceRegisterMapping.FromString,
)
self.doEndpointRegister = channel.unary_unary(
'/Register/doEndpointRegister',
request_serializer=register_dot_Register__pb2.Endpoints.SerializeToString,
response_deserializer=register_dot_Register__pb2.EndpointMapping.FromString,
)
self.doNetworkAddressRegister = channel.unary_unary(
'/Register/doNetworkAddressRegister',
request_serializer=register_dot_Register__pb2.NetAddresses.SerializeToString,
response_deserializer=register_dot_Register__pb2.NetAddressMapping.FromString,
)
self.doServiceAndNetworkAddressMappingRegister = channel.unary_unary(
'/Register/doServiceAndNetworkAddressMappingRegister',
request_serializer=register_dot_Register__pb2.ServiceAndNetworkAddressMappings.SerializeToString,
response_deserializer=common_dot_common__pb2.Commands.FromString,
)
class RegisterServicer(object):
"""register service for ApplicationCode, this service is called when service starts.
"""
def doServiceRegister(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def doServiceInstanceRegister(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def doEndpointRegister(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def doNetworkAddressRegister(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def doServiceAndNetworkAddressMappingRegister(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RegisterServicer_to_server(servicer, server):
rpc_method_handlers = {
'doServiceRegister': grpc.unary_unary_rpc_method_handler(
servicer.doServiceRegister,
request_deserializer=register_dot_Register__pb2.Services.FromString,
response_serializer=register_dot_Register__pb2.ServiceRegisterMapping.SerializeToString,
),
'doServiceInstanceRegister': grpc.unary_unary_rpc_method_handler(
servicer.doServiceInstanceRegister,
request_deserializer=register_dot_Register__pb2.ServiceInstances.FromString,
response_serializer=register_dot_Register__pb2.ServiceInstanceRegisterMapping.SerializeToString,
),
'doEndpointRegister': grpc.unary_unary_rpc_method_handler(
servicer.doEndpointRegister,
request_deserializer=register_dot_Register__pb2.Endpoints.FromString,
response_serializer=register_dot_Register__pb2.EndpointMapping.SerializeToString,
),
'doNetworkAddressRegister': grpc.unary_unary_rpc_method_handler(
servicer.doNetworkAddressRegister,
request_deserializer=register_dot_Register__pb2.NetAddresses.FromString,
response_serializer=register_dot_Register__pb2.NetAddressMapping.SerializeToString,
),
'doServiceAndNetworkAddressMappingRegister': grpc.unary_unary_rpc_method_handler(
servicer.doServiceAndNetworkAddressMappingRegister,
request_deserializer=register_dot_Register__pb2.ServiceAndNetworkAddressMappings.FromString,
response_serializer=common_dot_common__pb2.Commands.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Register', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Register(object):
"""register service for ApplicationCode, this service is called when service starts.
"""
@staticmethod
def doServiceRegister(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Register/doServiceRegister',
register_dot_Register__pb2.Services.SerializeToString,
register_dot_Register__pb2.ServiceRegisterMapping.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def doServiceInstanceRegister(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Register/doServiceInstanceRegister',
register_dot_Register__pb2.ServiceInstances.SerializeToString,
register_dot_Register__pb2.ServiceInstanceRegisterMapping.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def doEndpointRegister(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Register/doEndpointRegister',
register_dot_Register__pb2.Endpoints.SerializeToString,
register_dot_Register__pb2.EndpointMapping.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def doNetworkAddressRegister(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Register/doNetworkAddressRegister',
register_dot_Register__pb2.NetAddresses.SerializeToString,
register_dot_Register__pb2.NetAddressMapping.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def doServiceAndNetworkAddressMappingRegister(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Register/doServiceAndNetworkAddressMappingRegister',
register_dot_Register__pb2.ServiceAndNetworkAddressMappings.SerializeToString,
common_dot_common__pb2.Commands.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
```
#### File: python2sky/tests/test_config.py
```python
import unittest
from python2sky import config
class TestConfig(unittest.TestCase):
def testConfig(self):
self.assertEqual(config.SERVICE_ID, 0)
config.SERVICE_ID = 1
self.assertEqual(config.SERVICE_ID, 1)
config.SERVICE_ID = 2
self.assertEqual(config.SERVICE_ID, 2)
config.SERVICE_INSTANCE_ID = 1
self.assertEqual(config.SERVICE_INSTANCE_ID, 1)
```
#### File: python2sky/tests/test_trace_segment_client.py
```python
import time
from python2sky import config
from python2sky.context.context_carrier import ContextCarrier
from python2sky.context.context_manager import ContextManager
from python2sky.remote.service_register_client import get_service_register
from python2sky.remote.trace_segment_client import get_trace_segment_client
from python2sky.util.uuid_util import global_id_to_string
from tests.base_test_case import BaseTestCase
class Trace_segment_client(BaseTestCase):
def test_send(self):
# get_service_register()
# time.sleep(10)
get_trace_segment_client()
# time.sleep(0)
# carrier = ContextCarrier()
# carrier.deserialize(self.SW6)
config.SERVICE_ID = 3
config.SERVICE_INSTANCE_ID = 53
entry_span = ContextManager.create_entry_span("/operation", None)
# local_span = ContextManager.create_local_span("/local")
# carrier2 = ContextCarrier()
# exit_span = ContextManager.create_inject_exit_span("/exit", "192.168.3.11:8080", carrier2)
# sw6 = carrier.serialize()
# self.assertEqual(sw6, carrier.serialize())
# self.assertEqual(ContextManager.get_global_trace_id(), global_id_to_string(["3", "4", "5"]))
# ContextManager.stop_span(exit_span)
# ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
```
#### File: python2sky/tests/test_tracing_context.py
```python
import time
from threading import Thread
from python2sky import config
from python2sky.context.context_carrier import ContextCarrier
from python2sky.context.context_manager import ContextManager
from python2sky.util.count_down_latch import CountDownLatch
from python2sky.util.uuid_util import global_id_to_string
from tests.base_test_case import BaseTestCase
class TestTracingContext(BaseTestCase):
def setUp(self):
super().setUp()
def test_ignored_segment(self):
entry_span = ContextManager.create_entry_span("/operation", None)
local_span = ContextManager.create_local_span("/local")
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
def test_tracing_context(self):
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", None)
local_span = ContextManager.create_local_span("/local")
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
def test_tracing_context_extract(self):
carrier = ContextCarrier()
carrier.deserialize(self.SW6)
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", carrier)
local_span = ContextManager.create_local_span("/local")
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
tracing_context = ContextManager.get_tracing_context()
self.assertEqual(tracing_context.segment.refs[0], carrier)
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
def test_tracing_context_inject(self):
carrier = ContextCarrier()
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", None)
local_span = ContextManager.create_local_span("/local")
exit_span = ContextManager.create_inject_exit_span("/exit", "172.16.17.32", carrier)
sw6 = carrier.serialize()
self.assertIsNotNone(sw6)
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
def test_tracing_context_inject_and_extract(self):
carrier = ContextCarrier()
carrier.deserialize(self.SW6)
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", carrier)
local_span = ContextManager.create_local_span("/local")
carrier2 = ContextCarrier()
exit_span = ContextManager.create_inject_exit_span("/exit", "172.16.17.32", carrier2)
sw6 = carrier.serialize()
self.assertEqual(sw6, carrier.serialize())
self.assertEqual(ContextManager.get_global_trace_id(), global_id_to_string(["3", "4", "5"]))
ContextManager.stop_span(exit_span)
ContextManager.stop_span(local_span)
ContextManager.stop_span(entry_span)
self.assertEqual(carrier.trace_id, carrier2.trace_id)
def local_thread(self, tracing_context, count_down_latch):
ContextManager.CONTEXT.trace_context = tracing_context
local_span = ContextManager.create_local_span("/local")
ContextManager.stop_span(local_span)
count_down_latch.count_down()
def exit_thread(self, tracing_context, count_down_latch):
ContextManager.CONTEXT.trace_context = tracing_context
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
ContextManager.stop_span(exit_span)
count_down_latch.count_down()
def test_async(self):
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", None)
count_down_latch = CountDownLatch(2)
t1 = Thread(target=self.local_thread, args=(ContextManager.get_tracing_context(), count_down_latch,))
t2 = Thread(target=self.exit_thread, args=(ContextManager.get_tracing_context(), count_down_latch,))
t1.start()
t2.start()
count_down_latch.wait()
ContextManager.stop_span(entry_span)
def test_async2(self):
config.SERVICE_ID = 1
config.SERVICE_INSTANCE_ID = 1
entry_span = ContextManager.create_entry_span("/operation", None)
context_carrier = ContextManager.capture()
count_down_latch = CountDownLatch(2)
trace_id = ContextManager.get_global_trace_id()
def local_thread():
local_span = ContextManager.create_local_span("/local")
ContextManager.continued(context_carrier)
trace_id1 = ContextManager.get_global_trace_id()
self.assertEqual(trace_id1, trace_id)
ContextManager.stop_span(local_span)
count_down_latch.count_down()
def exit_thread():
exit_span = ContextManager.create_exit_span("/exit", "172.16.17.32")
ContextManager.continued(context_carrier)
trace_id2 = ContextManager.get_global_trace_id()
self.assertEqual(trace_id2, trace_id)
time.sleep(3)
ContextManager.stop_span(exit_span)
count_down_latch.count_down()
t1 = Thread(target=local_thread, args=())
t2 = Thread(target=exit_thread, args=())
t1.start()
t2.start()
ContextManager.stop_span(entry_span)
count_down_latch.wait()
``` |
{
"source": "johnml1135/machine.py",
"score": 3
} |
#### File: machine/annotations/range.py
```python
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Generic, Iterable, Iterator, Optional, Sized, TypeVar, cast
from ..utils.comparable import Comparable
Offset = TypeVar("Offset")
@dataclass(frozen=True)
class Range(Generic[Offset], Sized, Iterable[Offset], Comparable):
_factory: "_RangeFactory[Offset]"
start: Offset
end: Offset
@classmethod
def create(cls, start: Offset, end: Optional[Offset] = None) -> "Range[Offset]":
if isinstance(start, int):
factory = cast(_RangeFactory[Offset], _INT_RANGE_FACTORY)
else:
raise RuntimeError("Range type not supported.")
return factory.create(start, end)
@property
def length(self) -> int:
return self._factory.get_length(self.start, self.end)
def overlaps(self, other: "Range[Offset]") -> bool:
if self._factory.include_endpoint:
return (
self._factory.offset_compare(self.start, other.end) <= 0
and self._factory.offset_compare(self.end, other.start) >= 0
)
return (
self._factory.offset_compare(self.start, other.end) < 0
and self._factory.offset_compare(self.end, other.start) > 0
)
def contains(self, other: "Range[Offset]") -> bool:
return (
self._factory.offset_compare(self.start, other.start) <= 0
and self._factory.offset_compare(self.end, other.end) >= 0
)
def compare_to(self, other: object) -> int:
if not isinstance(other, Range):
raise TypeError("other is not the same type of Range.")
other = cast(Range[Offset], other)
if self._factory != other._factory:
raise TypeError("other is not the same type of Range.")
res = self._factory.offset_compare(self.start, other.start)
if res == 0:
res = -self._factory.offset_compare(self.end, other.end)
return res
def __len__(self) -> int:
return self.length
def __iter__(self) -> Iterator[Offset]:
return iter(self._factory.iterate(self.start, self.end))
def __repr__(self) -> str:
return f"[{self.start}, {self.end}]"
class _RangeFactory(ABC, Generic[Offset]):
@property
@abstractmethod
def include_endpoint(self) -> bool:
...
def create(self, start: Offset, end: Optional[Offset]) -> Range[Offset]:
if end is None:
end = start
return Range(self, start, end)
@abstractmethod
def get_length(self, start: Offset, end: Offset) -> int:
...
@abstractmethod
def iterate(self, start: Offset, end: Offset) -> Iterable[Offset]:
...
@abstractmethod
def offset_compare(self, x: Offset, y: Offset) -> int:
...
class _IntRangeFactory(_RangeFactory[int]):
@property
def include_endpoint(self) -> bool:
return False
def create(self, start: int, end: Optional[int]) -> "Range[int]":
if end is None:
end = start + 1
return Range(self, start, end)
def get_length(self, start: int, end: int) -> int:
return end - start
def iterate(self, start: int, end: int) -> Iterable[int]:
return range(start, end)
def offset_compare(self, x: int, y: int) -> int:
if x < y:
return -1
if x > y:
return 1
return 0
_INT_RANGE_FACTORY = _IntRangeFactory()
```
#### File: machine/corpora/corpora_helpers.py
```python
import os
import platform
from glob import glob
from pathlib import Path
from typing import Generator, Iterable, Optional, Tuple, TypeVar
import regex as re
from ..scripture.canon import book_id_to_number
from ..scripture.verse_ref import VERSE_RANGE_SEPARATOR, VERSE_SEQUENCE_INDICATOR, Versification, VersificationType
def get_files(file_patterns: Iterable[str]) -> Iterable[Tuple[str, str]]:
file_patterns = list(file_patterns)
if len(file_patterns) == 1 and os.path.isfile(file_patterns[0]):
yield ("*all*", file_patterns[0])
else:
for file_pattern in file_patterns:
path = file_pattern
search_pattern = "*"
if not file_pattern.endswith(os.sep) and not os.path.isdir(file_pattern):
path = os.path.dirname(file_pattern)
search_pattern = os.path.basename(file_pattern)
if path == "":
path = "."
base, _ = os.path.splitext(search_pattern)
converted_mask = re.escape(base).replace("\\*", "(.*)").replace("\\?", "(.)")
mask_regex = re.compile(converted_mask, re.IGNORECASE if platform.system() == "Windows" else 0)
for filename in glob(os.path.join(path, search_pattern)):
id = os.path.basename(filename)
id, _ = os.path.splitext(id)
match = mask_regex.fullmatch(id)
if match is not None:
updated_id = ""
for group in match.groups():
if group is None:
continue
if len(updated_id) > 0:
updated_id += "-"
updated_id += group
if len(updated_id) > 0:
id = updated_id
yield (id, filename)
T = TypeVar("T")
def gen(iterable: Iterable[T] = []) -> Generator[T, None, None]:
return (i for i in iterable)
def get_scripture_text_sort_key(id: str) -> str:
return str(book_id_to_number(id)).zfill(3)
def get_usx_id(filename: Path) -> str:
name = filename.name
if len(name) == 3:
return name
return name[3:6]
def get_usx_versification(project_dir: Path, versification: Optional[Versification]) -> Versification:
versification_filename = project_dir / "versification.vrs"
if versification is None and versification_filename.is_file():
versification_name = project_dir.name
versification = Versification.load(versification_filename, fallback_name=versification_name)
return Versification.get_builtin(VersificationType.ENGLISH) if versification is None else versification
def merge_verse_ranges(verse1: str, verse2: str) -> str:
text = ""
verse1_nums = set(_get_verse_nums(verse1))
verse2_nums = set(_get_verse_nums(verse2))
start_verse_str = ""
prev_verse_num = -1
prev_verse_str = ""
for verse_num, verse_str in sorted(verse1_nums | verse2_nums, key=lambda x: x[0]):
if prev_verse_num == -1:
start_verse_str = verse_str
elif prev_verse_num != verse_num - 1:
if len(text) > 0:
text += VERSE_SEQUENCE_INDICATOR
text += _get_verse_range(start_verse_str, prev_verse_str)
start_verse_str = verse_str
prev_verse_num = verse_num
prev_verse_str = verse_str
if len(text) > 0:
text += VERSE_SEQUENCE_INDICATOR
text += _get_verse_range(start_verse_str, prev_verse_str)
return text
def _get_verse_range(start_verse_num: str, end_verse_num: str) -> str:
verse_range = start_verse_num
if end_verse_num != start_verse_num:
verse_range += VERSE_RANGE_SEPARATOR
verse_range += end_verse_num
return verse_range
def _get_verse_nums(verse: str) -> Iterable[Tuple[int, str]]:
parts = verse.split(VERSE_SEQUENCE_INDICATOR)
for part in parts:
pieces = part.split(VERSE_RANGE_SEPARATOR)
start_verse_num = _get_verse_num(pieces[0])
yield start_verse_num, pieces[0]
if len(pieces) <= 1:
continue
end_verse_num = _get_verse_num(pieces[1])
for verse_num in range(start_verse_num + 1, end_verse_num):
yield verse_num, str(verse_num)
yield end_verse_num, pieces[1]
def _get_verse_num(verse_str: str) -> int:
v_num = 0
for ch in verse_str:
if not ch.isdigit():
break
v_num = v_num * 10 + int(ch)
return v_num
```
#### File: machine/corpora/dbl_bundle_text_corpus.py
```python
import os
import xml.etree.ElementTree as etree
from io import TextIOWrapper
from typing import List
from zipfile import ZipFile
from ..scripture.verse_ref import Versification, VersificationType
from ..tokenization.tokenizer import Tokenizer
from ..utils.typeshed import StrPath
from .dbl_bundle_text import DblBundleText
from .scripture_text_corpus import ScriptureTextCorpus
class DblBundleTextCorpus(ScriptureTextCorpus):
_SUPPORTED_VERSIONS = {"2.0", "2.1"}
def __init__(self, word_tokenizer: Tokenizer[str, int, str], filename: StrPath) -> None:
with ZipFile(filename, "r") as archive:
with archive.open("metadata.xml", "r") as stream:
doc = etree.parse(stream)
if doc.getroot().get("version") not in DblBundleTextCorpus._SUPPORTED_VERSIONS:
raise RuntimeError("Unsupported version of DBL bundle.")
versification_entry = next(
(zi for zi in archive.filelist if os.path.basename(zi.filename) == "versification.vrs"), None
)
if versification_entry is not None:
with archive.open(versification_entry, "r") as stream:
abbr = doc.getroot().findtext("./identification/abbreviation", "")
versification = Versification.parse(
TextIOWrapper(stream, encoding="utf-8-sig"), "versification.vrs", fallback_name=abbr
)
else:
versification = Versification.get_builtin(VersificationType.ENGLISH)
texts: List[DblBundleText] = []
for content_elem in doc.getroot().findall("./publications/publication[@default='true']/structure/content"):
texts.append(
DblBundleText(
word_tokenizer, content_elem.get("role", ""), filename, content_elem.get("src", ""), versification
)
)
super().__init__(word_tokenizer, versification, texts)
```
#### File: machine/corpora/parallel_text_corpus.py
```python
from typing import Generator, Iterable, Optional, Set
from ..utils.context_managed_generator import ContextManagedGenerator
from .dictionary_text_alignment_corpus import DictionaryTextAlignmentCorpus
from .parallel_text import ParallelText
from .parallel_text_segment import ParallelTextSegment
from .text_alignment_corpus import TextAlignmentCorpus
from .text_corpus import TextCorpus
from .text_segment import TextSegment
class ParallelTextCorpus:
def __init__(
self,
source_corpus: TextCorpus,
target_corpus: TextCorpus,
text_alignment_corpus: Optional[TextAlignmentCorpus] = None,
) -> None:
self._source_corpus = source_corpus
self._target_corpus = target_corpus
self._text_alignment_corpus = (
DictionaryTextAlignmentCorpus() if text_alignment_corpus is None else text_alignment_corpus
)
@property
def source_corpus(self) -> TextCorpus:
return self._source_corpus
@property
def target_corpus(self) -> TextCorpus:
return self._target_corpus
@property
def text_alignment_corpus(self) -> TextAlignmentCorpus:
return self._text_alignment_corpus
@property
def texts(self) -> Iterable[ParallelText]:
return self.get_texts()
@property
def segments(self) -> ContextManagedGenerator[ParallelTextSegment, None, None]:
return self.get_segments()
@property
def source_segments(self) -> ContextManagedGenerator[TextSegment, None, None]:
return ContextManagedGenerator(self._get_source_segments())
@property
def target_segments(self) -> ContextManagedGenerator[TextSegment, None, None]:
return ContextManagedGenerator(self._get_target_segments())
def invert(self) -> "ParallelTextCorpus":
return ParallelTextCorpus(self._target_corpus, self._source_corpus, self._text_alignment_corpus.invert())
def get_texts(self, all_source_segments: bool = False, all_target_segments: bool = False) -> Iterable[ParallelText]:
source_text_ids = {t.id for t in self._source_corpus.texts}
target_text_ids = {t.id for t in self._target_corpus.texts}
text_ids: Set[str]
if all_source_segments and all_target_segments:
text_ids = source_text_ids | target_text_ids
elif not all_source_segments and not all_target_segments:
text_ids = source_text_ids & target_text_ids
elif all_source_segments:
text_ids = source_text_ids
else:
text_ids = target_text_ids
return sorted((self._create_parallel_text(id) for id in text_ids), key=lambda t: t.sort_key)
def get_segments(
self, all_source_segments: bool = False, all_target_segments: bool = False, include_text: bool = True
) -> ContextManagedGenerator[ParallelTextSegment, None, None]:
return ContextManagedGenerator(self._get_segments(all_source_segments, all_target_segments, include_text))
def get_count(
self, all_source_segments: bool = False, all_target_segments: bool = False, nonempty_only: bool = False
) -> int:
return sum(
t.get_count(all_source_segments, all_target_segments, nonempty_only)
for t in self.get_texts(all_source_segments, all_target_segments)
)
def _get_segments(
self, all_source_segments: bool, all_target_segments: bool, include_text: bool
) -> Generator[ParallelTextSegment, None, None]:
for text in self.get_texts(all_source_segments, all_target_segments):
with text.get_segments(all_source_segments, all_target_segments, include_text) as segments:
for segment in segments:
yield segment
def _get_source_segments(self) -> Generator[TextSegment, None, None]:
for text in self.texts:
with text.source_text.get_segments() as segments:
for segment in segments:
yield segment
def _get_target_segments(self) -> Generator[TextSegment, None, None]:
for text in self.texts:
with text.target_text.get_segments() as segments:
for segment in segments:
yield segment
def _create_parallel_text(self, id: str) -> ParallelText:
source_text = self._source_corpus[id]
target_text = self._target_corpus[id]
text_alignment_collection = self._text_alignment_corpus[id]
return ParallelText(source_text, target_text, text_alignment_collection)
```
#### File: machine/corpora/text_alignment_corpus.py
```python
from abc import ABC, abstractmethod
from typing import Iterable
from .text_alignment_collection import TextAlignmentCollection
class TextAlignmentCorpus(ABC):
@property
@abstractmethod
def text_alignment_collections(self) -> Iterable[TextAlignmentCollection]:
...
@abstractmethod
def __getitem__(self, id: str) -> TextAlignmentCollection:
...
@abstractmethod
def create_null_text_alignment_collection(self, id: str) -> TextAlignmentCollection:
...
@abstractmethod
def invert(self) -> "TextAlignmentCorpus":
...
def get_text_alignment_collection(self, id: str) -> TextAlignmentCollection:
return self[id]
```
#### File: machine/corpora/text_segment.py
```python
from dataclasses import dataclass
from typing import Any, Sequence
@dataclass(eq=False, frozen=True)
class TextSegment:
text_id: str
segment_ref: Any
segment: Sequence[str]
is_sentence_start: bool
is_in_range: bool
is_range_start: bool
is_empty: bool
def __repr__(self) -> str:
if self.is_empty:
segment = "<range>" if self.is_in_range else "EMPTY"
elif len(self.segment) > 0:
segment = " ".join(self.segment)
else:
segment = "NONEMPTY"
return f"{self.segment_ref} - {segment}"
```
#### File: machine/corpora/usfm_parser.py
```python
from typing import List, Sequence, Tuple
from .usfm_marker import UsfmStyleType, UsfmTextProperties
from .usfm_stylesheet import UsfmStylesheet
from .usfm_token import UsfmToken, UsfmTokenType
class UsfmParser:
def __init__(self, stylesheet: UsfmStylesheet) -> None:
self._stylesheet = stylesheet
def parse(self, usfm: str, preserve_whitespace: bool = False) -> Sequence[UsfmToken]:
tokens: List[UsfmToken] = []
index = 0
while index < len(usfm):
next_marker_index = usfm.find("\\", index + 1) if index < len(usfm) - 1 else -1
if next_marker_index == -1:
next_marker_index = len(usfm)
# If text, create text token until end or next \
ch = usfm[index]
if ch != "\\":
text = usfm[index:next_marker_index]
if not preserve_whitespace:
text = _regularize_spaces(text)
tokens.append(UsfmToken(UsfmTokenType.TEXT, None, text))
index = next_marker_index
continue
# Get marker (and move past whitespace or star ending)
index += 1
marker_start = index
while index < len(usfm):
ch = usfm[index]
# Backslash starts a new marker
if ch == "\\":
break
# End star is part of marker
if ch == "*":
index += 1
break
if _is_nonsemantic_whitespace(ch):
# Preserve whitespace if needed, otherwise skip
if not preserve_whitespace:
index += 1
break
index += 1
marker_str = usfm[marker_start:index].rstrip()
# Multiple whitespace after non-end marker is ok
if not marker_str.endswith("*") and not preserve_whitespace:
while index < len(usfm) and _is_nonsemantic_whitespace(usfm[index]):
index += 1
# Lookup marker
marker = self._stylesheet.get_marker(marker_str.lstrip("+"))
# If starts with a plus and is not a character style, it is an unknown marker
if marker_str.startswith("+") and marker.style_type != UsfmStyleType.CHARACTER:
marker = self._stylesheet.get_marker(marker_str)
if marker.style_type == UsfmStyleType.CHARACTER:
if (marker.text_properties & UsfmTextProperties.VERSE) == UsfmTextProperties.VERSE:
index, text = _get_next_word(usfm, index, preserve_whitespace)
tokens.append(UsfmToken(UsfmTokenType.VERSE, marker, text))
else:
tokens.append(UsfmToken(UsfmTokenType.CHARACTER, marker, None))
elif marker.style_type == UsfmStyleType.PARAGRAPH:
# Handle chapter special case
if (marker.text_properties & UsfmTextProperties.CHAPTER) == UsfmTextProperties.CHAPTER:
index, text = _get_next_word(usfm, index, preserve_whitespace)
tokens.append(UsfmToken(UsfmTokenType.CHAPTER, marker, text))
elif (marker.text_properties & UsfmTextProperties.BOOK) == UsfmTextProperties.BOOK:
index, text = _get_next_word(usfm, index, preserve_whitespace)
tokens.append(UsfmToken(UsfmTokenType.BOOK, marker, text))
else:
tokens.append(UsfmToken(UsfmTokenType.PARAGRAPH, marker, None))
elif marker.style_type == UsfmStyleType.NOTE:
index, text = _get_next_word(usfm, index, preserve_whitespace)
tokens.append(UsfmToken(UsfmTokenType.NOTE, marker, text))
elif marker.style_type == UsfmStyleType.END:
tokens.append(UsfmToken(UsfmTokenType.END, marker, None))
elif marker.style_type == UsfmStyleType.UNKNOWN:
# End tokens are always end tokens, even if unknown
if marker_str.endswith("*"):
tokens.append(UsfmToken(UsfmTokenType.END, marker, None))
# Handle special case of esb and esbe which might not be in basic stylesheet but are always sidebars
# and so should be tokenized as paragraphs
elif marker_str == "esb" or marker_str == "esbe":
tokens.append(UsfmToken(UsfmTokenType.PARAGRAPH, marker, None))
else:
# Create unknown token with a corresponding end note
tokens.append(UsfmToken(UsfmTokenType.UNKNOWN, marker, None))
# Forces a space to be present in tokenization if immediately before a token requiring a preceeding CR/LF. This
# is to ensure that when written to disk and re-read, that tokenization will match. For example,
# "\p test\p here" requires a space after "test". Also, "\p \em test\em*\p here" requires a space token inserted
# after \em*
if not preserve_whitespace:
for i in range(1, len(tokens)):
cur_token = tokens[i]
prev_token = tokens[i - 1]
# If requires newline (verses do, except when after '(' or '[')
if (
cur_token.type == UsfmTokenType.BOOK
or cur_token.type == UsfmTokenType.CHAPTER
or cur_token.type == UsfmTokenType.PARAGRAPH
or (
cur_token.type == UsfmTokenType.VERSE
and not (
prev_token.type == UsfmTokenType.TEXT
and prev_token.text is not None
and (prev_token.text.endswith("(") or prev_token.text.endswith("["))
)
)
):
# Add space to text token
if prev_token.type == UsfmTokenType.TEXT:
assert prev_token.text is not None
if not prev_token.text.endswith(" "):
tokens[i - 1] = UsfmToken(UsfmTokenType.TEXT, None, prev_token.text + " ")
elif prev_token.type == UsfmTokenType.END:
# Insert space token after * of end marker
tokens.insert(i, UsfmToken(UsfmTokenType.TEXT, None, " "))
i += 1
return tokens
_ZERO_WIDTH_SPACE = "\u200B"
def _get_next_word(usfm: str, index: int, preserve_whitespace: bool) -> Tuple[int, str]:
# Skip over leading spaces
while index < len(usfm) and _is_nonsemantic_whitespace(usfm[index]):
index += 1
data_start = index
while index < len(usfm) and not _is_nonsemantic_whitespace(usfm[index]) and usfm[index] != "\\":
index += 1
data = usfm[data_start:index]
# Skip over trailing spaces
if not preserve_whitespace:
while index < len(usfm) and _is_nonsemantic_whitespace(usfm[index]):
index += 1
return index, data
def _is_nonsemantic_whitespace(c: str) -> bool:
# Checks if is whitespace, but not U+3000 (IDEOGRAPHIC SPACE).
return (c != "\u3000" and c.isspace()) or c == _ZERO_WIDTH_SPACE
def _regularize_spaces(text: str) -> str:
was_space = False
result = ""
for i in range(len(text)):
ch = text[i]
# Control characters and CR/LF and TAB become spaces
if ord(ch) < 32:
if not was_space:
result += " "
was_space = True
elif (
not was_space and ch == _ZERO_WIDTH_SPACE and i + 1 < len(text) and _is_nonsemantic_whitespace(text[i + 1])
):
# ZWSP is redundant if followed by a space
pass
elif _is_nonsemantic_whitespace(ch):
# Keep other kinds of spaces
if not was_space:
result += ch
was_space = True
else:
result += ch
was_space = False
return result
```
#### File: machine/corpora/usfm_token.py
```python
from dataclasses import dataclass
from enum import Enum, auto
from typing import Optional
from .usfm_marker import UsfmMarker
class UsfmTokenType(Enum):
BOOK = auto()
CHAPTER = auto()
VERSE = auto()
TEXT = auto()
PARAGRAPH = auto()
CHARACTER = auto()
NOTE = auto()
END = auto()
UNKNOWN = auto()
@dataclass(frozen=True)
class UsfmToken:
type: UsfmTokenType
marker: Optional[UsfmMarker]
text: Optional[str]
def __repr__(self) -> str:
string = ""
if self.marker is not None:
string += str(self.marker)
if self.text is not None and self.text != "":
if len(string) > 0:
string += " "
string += self.text
return string
```
#### File: machine/corpora/usx_text_base.py
```python
from abc import abstractmethod
from typing import Generator, Optional
from ..scripture.verse_ref import VerseRef, Versification
from ..tokenization.tokenizer import Tokenizer
from .scripture_text import ScriptureText
from .stream_container import StreamContainer
from .text_segment import TextSegment
from .usx_verse_parser import UsxVerseParser
class UsxTextBase(ScriptureText):
def __init__(
self, word_tokenizer: Tokenizer[str, int, str], id: str, versification: Optional[Versification]
) -> None:
super().__init__(word_tokenizer, id, versification)
self._parser = UsxVerseParser()
@abstractmethod
def _create_stream_container(self) -> StreamContainer:
...
def _get_segments(self, include_text: bool) -> Generator[TextSegment, None, None]:
with self._create_stream_container() as stream_container, stream_container.open_stream() as stream:
prev_verse_ref = VerseRef()
for verse in self._parser.parse(stream):
for seg in self._create_text_segments(
include_text, prev_verse_ref, verse.chapter, verse.verse, verse.text, verse.is_sentence_start
):
yield seg
```
#### File: machine/tokenization/null_tokenizer.py
```python
from typing import Iterable, Optional
from ..annotations.range import Range
from .string_tokenizer import StringTokenizer
class NullTokenizer(StringTokenizer):
def tokenize_as_ranges(self, data: str, data_range: Optional[Range[int]] = None) -> Iterable[Range[int]]:
if data_range is None:
data_range = Range.create(0, len(data))
if len(data_range) > 0:
yield data_range
```
#### File: machine/tokenization/string_tokenizer.py
```python
from typing import Iterable, Optional
from ..annotations.range import Range
from .range_tokenizer import RangeTokenizer
class StringTokenizer(RangeTokenizer[str, int, str]):
def tokenize(self, data: str, data_range: Optional[Range[int]] = None) -> Iterable[str]:
return (data[r.start : r.end] for r in self.tokenize_as_ranges(data, data_range))
```
#### File: machine/tokenization/tokenizer.py
```python
from abc import ABC, abstractmethod
from typing import Generic, Iterable, Optional, TypeVar
from ..annotations.range import Range
Data = TypeVar("Data")
Offset = TypeVar("Offset")
Token = TypeVar("Token")
class Tokenizer(ABC, Generic[Data, Offset, Token]):
@abstractmethod
def tokenize(self, data: Data, data_range: Optional[Range[Offset]] = None) -> Iterable[Token]:
...
```
#### File: machine/translation/symmetrized_word_alignment_model_trainer.py
```python
from typing import Callable, Optional
from ..utils.phased_progress_reporter import Phase, PhasedProgressReporter
from ..utils.progress_status import ProgressStatus
from .trainer import Trainer, TrainStats
class SymmetrizedWordAlignmentModelTrainer(Trainer):
def __init__(self, direct_trainer: Trainer, inverse_trainer: Trainer) -> None:
self._direct_trainer = direct_trainer
self._inverse_trainer = inverse_trainer
@property
def stats(self) -> TrainStats:
return self._direct_trainer.stats
def train(
self,
progress: Optional[Callable[[ProgressStatus], None]] = None,
check_canceled: Optional[Callable[[], None]] = None,
) -> None:
reporter = PhasedProgressReporter(
progress, [Phase("Training direct alignment model"), Phase("Training inverse alignment model")]
)
with reporter.start_next_phase() as phase_progress:
self._direct_trainer.train(phase_progress, check_canceled)
if check_canceled is not None:
check_canceled()
with reporter.start_next_phase() as phase_progress:
self._inverse_trainer.train(phase_progress, check_canceled)
def save(self) -> None:
self._direct_trainer.save()
self._inverse_trainer.save()
```
#### File: translation/thot/thot_word_alignment_model.py
```python
import sys
from abc import abstractmethod
from pathlib import Path
from typing import Collection, Iterable, Iterator, Optional, Sequence, Tuple, Union
import thot.alignment as ta
from ...corpora.parallel_text_corpus import ParallelTextCorpus
from ...corpora.token_processors import NO_OP, TokenProcessor
from ...utils.typeshed import StrPath
from ..ibm1_word_alignment_model import Ibm1WordAlignmentModel
from ..trainer import Trainer
from ..word_alignment_matrix import WordAlignmentMatrix
from ..word_vocabulary import WordVocabulary
from .thot_word_alignment_model_trainer import ThotWordAlignmentModelTrainer
from .thot_word_alignment_model_type import ThotWordAlignmentModelType
from .thot_word_alignment_parameters import ThotWordAlignmentParameters
_SPECIAL_SYMBOL_INDICES = {0, 1, 2}
class ThotWordAlignmentModel(Ibm1WordAlignmentModel):
def __init__(self, prefix_filename: Optional[StrPath] = None, create_new: bool = False) -> None:
self._set_model(self._create_model())
if prefix_filename is not None:
prefix_filename = Path(prefix_filename)
if create_new or not (prefix_filename.parent / (prefix_filename.name + ".src")).is_file():
self.create_new(prefix_filename)
else:
self.load(prefix_filename)
else:
self._prefix_filename = None
self.parameters = ThotWordAlignmentParameters()
@property
def source_words(self) -> WordVocabulary:
return self._source_words
@property
def target_words(self) -> WordVocabulary:
return self._target_words
@property
def special_symbol_indices(self) -> Collection[int]:
return _SPECIAL_SYMBOL_INDICES
@property
def thot_model(self) -> ta.AlignmentModel:
return self._model
@property
@abstractmethod
def type(self) -> ThotWordAlignmentModelType:
...
def load(self, prefix_filename: StrPath) -> None:
prefix_filename = Path(prefix_filename)
if not (prefix_filename.parent / (prefix_filename.name + ".src")).is_file():
raise FileNotFoundError("The word alignment model configuration could not be found.")
self._prefix_filename = prefix_filename
self._model.clear()
self._model.load(str(prefix_filename))
def create_new(self, prefix_filename: StrPath) -> None:
self._prefix_filename = Path(prefix_filename)
self._model.clear()
def save(self) -> None:
if self._prefix_filename is not None:
self._model.print(str(self._prefix_filename))
def create_trainer(
self,
corpus: ParallelTextCorpus,
source_preprocessor: TokenProcessor = NO_OP,
target_preprocessor: TokenProcessor = NO_OP,
max_corpus_count: int = sys.maxsize,
) -> Trainer:
return _Trainer(self, corpus, self._prefix_filename, source_preprocessor, target_preprocessor, max_corpus_count)
def get_best_alignment(self, source_segment: Sequence[str], target_segment: Sequence[str]) -> WordAlignmentMatrix:
_, matrix = self._model.get_best_alignment(source_segment, target_segment)
return WordAlignmentMatrix(matrix.to_numpy())
def get_best_alignments(
self, source_segments: Sequence[Sequence[str]], target_segments: Sequence[Sequence[str]]
) -> Sequence[WordAlignmentMatrix]:
if len(source_segments) != len(target_segments):
raise ValueError("The number of source and target segments must be equal.")
return [
WordAlignmentMatrix(matrix.to_numpy())
for _, matrix in self._model.get_best_alignments(source_segments, target_segments)
]
def get_translation_score(
self, source_word: Optional[Union[str, int]], target_word: Optional[Union[str, int]]
) -> float:
return self.get_translation_probability(source_word, target_word)
def get_translation_probability(
self, source_word: Optional[Union[str, int]], target_word: Optional[Union[str, int]]
) -> float:
if source_word is None:
source_word = 0
elif isinstance(source_word, str):
source_word = self._model.get_src_word_index(source_word)
if target_word is None:
target_word = 0
elif isinstance(target_word, str):
target_word = self._model.get_trg_word_index(target_word)
return self._model.translation_prob(source_word, target_word)
def get_translations(
self, source_word: Optional[Union[str, int]], threshold: float = 0
) -> Iterable[Tuple[int, float]]:
if source_word is None:
source_word = 0
elif isinstance(source_word, str):
source_word = self._model.get_src_word_index(source_word)
return self._model.get_translations(source_word, threshold)
def _create_model(self) -> ta.AlignmentModel:
if self.type is ThotWordAlignmentModelType.IBM1:
return ta.Ibm1AlignmentModel()
elif self.type is ThotWordAlignmentModelType.IBM2:
return ta.Ibm2AlignmentModel()
elif self.type is ThotWordAlignmentModelType.IBM3:
return ta.Ibm3AlignmentModel()
elif self.type is ThotWordAlignmentModelType.IBM4:
return ta.Ibm4AlignmentModel()
elif self.type is ThotWordAlignmentModelType.HMM:
return ta.HmmAlignmentModel()
elif self.type is ThotWordAlignmentModelType.FAST_ALIGN:
return ta.FastAlignModel()
else:
raise ValueError("The model type is invalid.")
def _set_model(self, model: ta.AlignmentModel) -> None:
self._model = model
self._source_words = _ThotWordVocabulary(self._model, is_src=True)
self._target_words = _ThotWordVocabulary(self._model, is_src=False)
class _ThotWordVocabulary(WordVocabulary):
def __init__(self, model: ta.AlignmentModel, is_src: bool) -> None:
self._model = model
self._is_src = is_src
def index(self, word: Optional[str]) -> int:
if word is None:
return 0
return self._model.get_src_word_index(word) if self._is_src else self._model.get_trg_word_index(word)
def __getitem__(self, word_index: int) -> str:
if word_index >= len(self):
raise IndexError
return self._model.get_src_word(word_index) if self._is_src else self._model.get_trg_word(word_index)
def __len__(self) -> int:
return self._model.src_vocab_size if self._is_src else self._model.trg_vocab_size
def __contains__(self, x: object) -> bool:
return any(self[i] == x for i in range(len(self)))
def __iter__(self) -> Iterator[str]:
return (self[i] for i in range(len(self)))
def __reversed__(self) -> Iterator[str]:
return (self[i] for i in reversed(range(len(self))))
class _Trainer(ThotWordAlignmentModelTrainer):
def __init__(
self,
model: ThotWordAlignmentModel,
corpus: ParallelTextCorpus,
prefix_filename: Optional[StrPath],
source_preprocessor: TokenProcessor,
target_preprocessor: TokenProcessor,
max_corpus_count: int,
) -> None:
super().__init__(
model.type,
corpus,
prefix_filename,
model.parameters,
source_preprocessor,
target_preprocessor,
max_corpus_count,
)
self._machine_model = model
def save(self) -> None:
super().save()
self._machine_model._set_model(self._model)
```
#### File: translation/thot/thot_word_alignment_model_trainer.py
```python
import sys
from pathlib import Path
from typing import Callable, List, Optional, Tuple, Union, overload
import thot.alignment as ta
from ...corpora.parallel_text_corpus import ParallelTextCorpus
from ...corpora.parallel_text_segment import ParallelTextSegment
from ...corpora.token_processors import NO_OP, TokenProcessor
from ...utils.progress_status import ProgressStatus
from ...utils.typeshed import StrPath
from ..trainer import Trainer, TrainStats
from .thot_word_alignment_model_type import ThotWordAlignmentModelType
from .thot_word_alignment_parameters import ThotWordAlignmentParameters
class ThotWordAlignmentModelTrainer(Trainer):
@overload
def __init__(
self,
model_type: ThotWordAlignmentModelType,
corpus: ParallelTextCorpus,
prefix_filename: Optional[StrPath],
parameters: ThotWordAlignmentParameters = ThotWordAlignmentParameters(),
source_preprocessor: TokenProcessor = NO_OP,
target_preprocessor: TokenProcessor = NO_OP,
max_corpus_count: int = sys.maxsize,
) -> None:
...
@overload
def __init__(
self,
model_type: ThotWordAlignmentModelType,
corpus: Tuple[StrPath, StrPath],
prefix_filename: Optional[StrPath],
parameters: ThotWordAlignmentParameters = ThotWordAlignmentParameters(),
source_preprocessor: TokenProcessor = NO_OP,
target_preprocessor: TokenProcessor = NO_OP,
) -> None:
...
def __init__(
self,
model_type: ThotWordAlignmentModelType,
corpus: Union[ParallelTextCorpus, Tuple[StrPath, StrPath]],
prefix_filename: Optional[StrPath],
parameters: ThotWordAlignmentParameters = ThotWordAlignmentParameters(),
source_preprocessor: TokenProcessor = NO_OP,
target_preprocessor: TokenProcessor = NO_OP,
max_corpus_count: int = sys.maxsize,
) -> None:
if isinstance(corpus, tuple) and max_corpus_count != sys.maxsize:
raise ValueError("max_corpus_count cannot be set when corpus filenames are provided.")
self._prefix_filename = None if prefix_filename is None else Path(prefix_filename)
self._source_preprocessor = source_preprocessor
self._target_preprocessor = target_preprocessor
self._parallel_corpus = corpus
self._max_corpus_count = max_corpus_count
self._stats = TrainStats()
def null_segment_filter(s: ParallelTextSegment, i: int) -> bool:
return True
self._segment_filter = null_segment_filter
self._models: List[Tuple[ta.AlignmentModel, int]] = []
if model_type is ThotWordAlignmentModelType.FAST_ALIGN:
fast_align = ta.FastAlignModel()
fast_align.variational_bayes = parameters.get_variational_bayes(model_type)
if parameters.fast_align_p0 is not None:
fast_align.fast_align_p0 = parameters.fast_align_p0
self._models.append((fast_align, parameters.get_fast_align_iteration_count(model_type)))
else:
ibm1 = ta.Ibm1AlignmentModel()
ibm1.variational_bayes = parameters.get_variational_bayes(model_type)
self._models.append((ibm1, parameters.get_ibm1_iteration_count(model_type)))
ibm2_or_hmm: Optional[ta.AlignmentModel] = None
if model_type >= ThotWordAlignmentModelType.IBM2:
if parameters.get_hmm_iteration_count(model_type) > 0:
ibm2_or_hmm = ta.HmmAlignmentModel(ibm1)
if parameters.hmm_p0 is not None:
ibm2_or_hmm.hmm_p0 = parameters.hmm_p0
if parameters.hmm_lexical_smoothing_factor is not None:
ibm2_or_hmm.lexical_smoothing_factor = parameters.hmm_lexical_smoothing_factor
if parameters.hmm_alignment_smoothing_factor is not None:
ibm2_or_hmm.hmm_alignment_smoothing_factor = parameters.hmm_alignment_smoothing_factor
self._models.append((ibm2_or_hmm, parameters.get_hmm_iteration_count(model_type)))
else:
ibm2_or_hmm = ta.Ibm2AlignmentModel(ibm1)
self._models.append((ibm2_or_hmm, parameters.get_ibm2_iteration_count(model_type)))
ibm3: Optional[ta.Ibm3AlignmentModel] = None
if (
model_type >= ThotWordAlignmentModelType.IBM3
and ibm2_or_hmm is not None
and parameters.get_ibm3_iteration_count(model_type) > 0
):
ibm3 = ta.Ibm3AlignmentModel(ibm2_or_hmm)
if parameters.ibm3_fertility_smoothing_factor is not None:
ibm3.fertility_smoothing_factor = parameters.ibm3_fertility_smoothing_factor
if parameters.ibm3_count_threshold is not None:
ibm3.count_threshold = parameters.ibm3_count_threshold
self._models.append((ibm3, parameters.get_ibm3_iteration_count(model_type)))
if model_type >= ThotWordAlignmentModelType.IBM4:
ibm4: Optional[ta.Ibm4AlignmentModel] = None
if ibm3 is not None:
ibm4 = ta.Ibm4AlignmentModel(ibm3)
elif isinstance(ibm2_or_hmm, ta.HmmAlignmentModel):
ibm4 = ta.Ibm4AlignmentModel(ibm2_or_hmm)
if ibm4 is not None:
if parameters.ibm4_distortion_smoothing_factor is not None:
ibm4.distortion_smoothing_factor = parameters.ibm4_distortion_smoothing_factor
for word, word_class in parameters.source_word_classes.items():
ibm4.map_src_word_to_word_class(word, word_class)
for word, word_class in parameters.target_word_classes.items():
ibm4.map_trg_word_to_word_class(word, word_class)
self._models.append((ibm4, parameters.get_ibm4_iteration_count(model_type)))
self._max_segment_length = self._model.max_sentence_length
@property
def stats(self) -> TrainStats:
return self._stats
@property
def segment_filter(self) -> Callable[[ParallelTextSegment, int], bool]:
return self._segment_filter
@segment_filter.setter
def segment_filter(self, value: Callable[[ParallelTextSegment, int], bool]) -> None:
if isinstance(self._parallel_corpus, tuple):
raise RuntimeError("A segment filter cannot be set when corpus filenames are provided.")
self._segment_filter = value
@property
def _model(self) -> ta.AlignmentModel:
return self._models[-1][0]
def train(
self,
progress: Optional[Callable[[ProgressStatus], None]] = None,
check_canceled: Optional[Callable[[], None]] = None,
) -> None:
num_steps = sum(iterations + 1 for _, iterations in self._models if iterations > 0) + 1
cur_step = 0
if progress is not None:
progress(ProgressStatus.from_step(cur_step, num_steps))
if isinstance(self._parallel_corpus, ParallelTextCorpus):
corpus_count = 0
index = 0
for segment in self._parallel_corpus.segments:
if self._segment_filter(segment, index):
source_segment = self._source_preprocessor.process(segment.source_segment)
target_segment = self._target_preprocessor.process(segment.target_segment)
self._model.add_sentence_pair(source_segment, target_segment, 1)
if self._is_segment_valid(segment):
corpus_count += 1
index += 1
if corpus_count == self._max_corpus_count:
break
else:
self._model.read_sentence_pairs(str(self._parallel_corpus[0]), str(self._parallel_corpus[1]))
cur_step += 1
if progress is not None:
progress(ProgressStatus.from_step(cur_step, num_steps))
if check_canceled is not None:
check_canceled()
trained_segment_count = 0
for model, iteration_count in self._models:
if iteration_count == 0:
continue
trained_segment_count = model.start_training()
cur_step += 1
if progress is not None:
progress(ProgressStatus.from_step(cur_step, num_steps))
if check_canceled is not None:
check_canceled()
for _ in range(iteration_count):
model.train()
cur_step += 1
if progress is not None:
progress(ProgressStatus.from_step(cur_step, num_steps))
if check_canceled is not None:
check_canceled()
model.end_training()
self._stats.trained_segment_count = trained_segment_count
def save(self) -> None:
if self._prefix_filename is not None:
self._model.print(str(self._prefix_filename))
def _is_segment_valid(self, segment: ParallelTextSegment) -> bool:
return (
not segment.is_empty
and len(segment.source_segment) <= self._max_segment_length
and len(segment.target_segment) <= self._max_segment_length
)
```
#### File: machine/translation/word_alignment_model.py
```python
import sys
from abc import abstractmethod
from typing import Collection, Dict, Iterable, Optional, Sequence, Tuple, Union
from ..corpora.aligned_word_pair import AlignedWordPair
from ..corpora.parallel_text_corpus import ParallelTextCorpus, ParallelTextSegment
from ..corpora.token_processors import NO_OP, TokenProcessor
from .trainer import Trainer
from .word_aligner import WordAligner
from .word_alignment_matrix import WordAlignmentMatrix
from .word_vocabulary import WordVocabulary
class WordAlignmentModel(WordAligner):
@property
@abstractmethod
def source_words(self) -> WordVocabulary:
...
@property
@abstractmethod
def target_words(self) -> WordVocabulary:
...
@property
@abstractmethod
def special_symbol_indices(self) -> Collection[int]:
...
@abstractmethod
def create_trainer(
self,
corpus: ParallelTextCorpus,
source_preprocessor: TokenProcessor = NO_OP,
target_preprocessor: TokenProcessor = NO_OP,
max_corpus_count: int = sys.maxsize,
) -> Trainer:
...
@abstractmethod
def get_translations(
self, source_word: Optional[Union[str, int]], threshold: float = 0
) -> Iterable[Tuple[int, float]]:
...
@abstractmethod
def get_translation_score(
self, source_word: Optional[Union[str, int]], target_word: Optional[Union[str, int]]
) -> float:
...
@abstractmethod
def get_alignment_score(
self,
source_length: int,
prev_source_index: int,
source_index: int,
target_length: int,
prev_target_index: int,
target_index: int,
) -> float:
...
def get_translation_table(self, threshold: float = 0) -> Dict[str, Dict[str, float]]:
results: Dict[str, Dict[str, float]] = {}
source_words = list(self.source_words)
target_words = list(self.target_words)
for i in range(len(source_words)):
row: Dict[str, float] = {}
for j, score in self.get_translations(i, threshold):
row[target_words[j]] = score
results[source_words[i]] = row
return results
def get_aligned_word_pairs(
self, source_segment: Sequence[str], target_segment: Sequence[str], wa_matrix: WordAlignmentMatrix
) -> Collection[AlignedWordPair]:
word_pairs, source_indices, target_indices = wa_matrix.get_asymmetric_alignments()
for word_pair in word_pairs:
source_word = source_segment[word_pair.source_index]
target_word = target_segment[word_pair.target_index]
word_pair.translation_score = self.get_translation_score(source_word, target_word)
prev_source_index = -1 if word_pair.target_index == 0 else source_indices[word_pair.target_index - 1]
prev_target_index = -1 if word_pair.source_index == 0 else target_indices[word_pair.source_index - 1]
word_pair.alignment_score = self.get_alignment_score(
len(source_segment),
prev_source_index,
word_pair.source_index,
len(target_segment),
prev_target_index,
word_pair.target_index,
)
return word_pairs
def get_alignment_string(
self,
segment: ParallelTextSegment,
include_scores: bool = True,
source_preprocessor: TokenProcessor = NO_OP,
target_preprocessor: TokenProcessor = NO_OP,
) -> str:
source_segment = source_preprocessor.process(segment.source_segment)
target_segment = target_preprocessor.process(segment.target_segment)
alignment = self.get_best_alignment_from_known(
source_segment, target_segment, WordAlignmentMatrix.from_parallel_text_segment(segment)
)
if not include_scores:
return str(alignment)
return " ".join(str(wp) for wp in self.get_aligned_word_pairs(source_segment, target_segment, alignment))
def get_giza_format_string(
self,
segment: ParallelTextSegment,
source_preprocessor: TokenProcessor = NO_OP,
target_preprocessor: TokenProcessor = NO_OP,
) -> str:
source_segment = source_preprocessor.process(segment.source_segment)
target_segment = target_preprocessor.process(segment.target_segment)
alignment = self.get_best_alignment_from_known(
source_segment, target_segment, WordAlignmentMatrix.from_parallel_text_segment(segment)
)
return alignment.to_giza_format(source_segment, target_segment)
```
#### File: tests/corpora/test_dbl_bundle_text.py
```python
from machine.scripture import VerseRef
from tests.corpora.dbl_bundle_test_environment import DblBundleTestEnvironment
def test_get_segments_nonempty_text() -> None:
with DblBundleTestEnvironment() as env:
text = env.corpus.get_text("MAT")
segments = list(text.get_segments())
assert len(segments) == 14
assert segments[0].segment_ref == VerseRef.from_string("MAT 1:1", env.corpus.versification)
assert segments[0].segment[0] == "Chapter one, verse one."
assert segments[1].segment_ref == VerseRef.from_string("MAT 1:2", env.corpus.versification)
assert segments[1].segment[0] == "Chapter one, verse two."
assert segments[4].segment_ref == VerseRef.from_string("MAT 1:5", env.corpus.versification)
assert segments[4].segment[0] == "Chapter one, verse five."
assert segments[5].segment_ref == VerseRef.from_string("MAT 2:1", env.corpus.versification)
assert segments[5].segment[0] == "Chapter two, verse one."
assert segments[6].segment_ref == VerseRef.from_string("MAT 2:2", env.corpus.versification)
assert segments[6].segment[0] == "Chapter two, verse two. Chapter two, verse three."
assert segments[6].is_in_range
assert segments[7].segment_ref == VerseRef.from_string("MAT 2:3", env.corpus.versification)
assert len(segments[7].segment) == 0
assert segments[7].is_in_range
assert segments[8].segment_ref == VerseRef.from_string("MAT 2:4a", env.corpus.versification)
assert len(segments[8].segment) == 0
assert segments[8].is_in_range
assert segments[9].segment_ref == VerseRef.from_string("MAT 2:4b", env.corpus.versification)
assert segments[9].segment[0] == "Chapter two, verse four."
assert segments[10].segment_ref == VerseRef.from_string("MAT 2:5", env.corpus.versification)
assert segments[10].segment[0] == "Chapter two, verse five."
assert segments[11].segment_ref == VerseRef.from_string("MAT 2:6", env.corpus.versification)
assert segments[11].segment[0] == "Chapter two, verse six."
def test_get_segments_sentence_start() -> None:
with DblBundleTestEnvironment() as env:
text = env.corpus.get_text("MAT")
segments = list(text.get_segments())
assert len(segments) == 14
assert segments[3].segment_ref == VerseRef.from_string("MAT 1:4", env.corpus.versification)
assert segments[3].segment[0] == "Chapter one, verse four,"
assert segments[3].is_sentence_start
assert segments[4].segment_ref == VerseRef.from_string("MAT 1:5", env.corpus.versification)
assert segments[4].segment[0] == "Chapter one, verse five."
assert not segments[4].is_sentence_start
def test_get_segments_empty_text() -> None:
with DblBundleTestEnvironment() as env:
text = env.corpus.get_text("MRK")
segments = list(text.get_segments())
assert len(segments) == 0
```
#### File: tests/corpora/test_parallel_text_corpus.py
```python
from machine.corpora import (
DictionaryTextAlignmentCorpus,
DictionaryTextCorpus,
MemoryText,
MemoryTextAlignmentCollection,
ParallelTextCorpus,
)
def test_texts_no_texts() -> None:
source_corpus = DictionaryTextCorpus()
target_corpus = DictionaryTextCorpus()
parallel_corpus = ParallelTextCorpus(source_corpus, target_corpus)
assert not any(parallel_corpus.texts)
def test_texts_no_missing_texts() -> None:
source_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text2"), MemoryText("text3"))
target_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text2"), MemoryText("text3"))
alignment_corpus = DictionaryTextAlignmentCorpus(
MemoryTextAlignmentCollection("text1"),
MemoryTextAlignmentCollection("text2"),
MemoryTextAlignmentCollection("text3"),
)
parallel_corpus = ParallelTextCorpus(source_corpus, target_corpus, alignment_corpus)
texts = parallel_corpus.texts
assert [t.id for t in texts] == ["text1", "text2", "text3"]
def test_texts_missing_text() -> None:
source_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text2"), MemoryText("text3"))
target_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text3"))
alignment_corpus = DictionaryTextAlignmentCorpus(
MemoryTextAlignmentCollection("text1"), MemoryTextAlignmentCollection("text3")
)
parallel_corpus = ParallelTextCorpus(source_corpus, target_corpus, alignment_corpus)
texts = parallel_corpus.texts
assert [t.id for t in texts] == ["text1", "text3"]
def test_get_texts_missing_target_text_all_source_segments() -> None:
source_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text2"), MemoryText("text3"))
target_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text3"))
alignment_corpus = DictionaryTextAlignmentCorpus(
MemoryTextAlignmentCollection("text1"), MemoryTextAlignmentCollection("text3")
)
parallel_corpus = ParallelTextCorpus(source_corpus, target_corpus, alignment_corpus)
texts = parallel_corpus.get_texts(all_source_segments=True)
assert [t.id for t in texts] == ["text1", "text2", "text3"]
def test_get_texts_missing_source_text_all_target_segments() -> None:
source_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text3"))
target_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text2"), MemoryText("text3"))
alignment_corpus = DictionaryTextAlignmentCorpus(
MemoryTextAlignmentCollection("text1"), MemoryTextAlignmentCollection("text3")
)
parallel_corpus = ParallelTextCorpus(source_corpus, target_corpus, alignment_corpus)
texts = parallel_corpus.get_texts(all_target_segments=True)
assert [t.id for t in texts] == ["text1", "text2", "text3"]
def test_get_texts_missing_source_and_target_text_all_source_and_target_segments() -> None:
source_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text3"))
target_corpus = DictionaryTextCorpus(MemoryText("text1"), MemoryText("text2"))
alignment_corpus = DictionaryTextAlignmentCorpus(MemoryTextAlignmentCollection("text1"))
parallel_corpus = ParallelTextCorpus(source_corpus, target_corpus, alignment_corpus)
texts = parallel_corpus.get_texts(all_source_segments=True, all_target_segments=True)
assert [t.id for t in texts] == ["text1", "text2", "text3"]
```
#### File: tests/corpora/test_usfm_file_text.py
```python
from io import StringIO
from machine.corpora import NullScriptureText, UsfmFileTextCorpus
from machine.scripture import ENGLISH_VERSIFICATION, ORIGINAL_VERSIFICATION, VerseRef, Versification
from machine.tokenization import NullTokenizer
from tests.corpora.corpora_test_helpers import USFM_STYLESHEET_PATH, USFM_TEST_PROJECT_PATH
def test_get_segments_nonempty_text() -> None:
tokenizer = NullTokenizer()
corpus = UsfmFileTextCorpus(tokenizer, USFM_STYLESHEET_PATH, "utf-8-sig", USFM_TEST_PROJECT_PATH)
text = corpus.get_text("MAT")
segments = list(text.get_segments())
assert len(segments) == 14
assert segments[0].segment_ref == VerseRef.from_string("MAT 1:1", corpus.versification)
assert segments[0].segment[0] == "Chapter one, verse one."
assert segments[1].segment_ref == VerseRef.from_string("MAT 1:2", corpus.versification)
assert segments[1].segment[0] == "Chapter one, verse two."
assert segments[4].segment_ref == VerseRef.from_string("MAT 1:5", corpus.versification)
assert segments[4].segment[0] == "Chapter one, verse five."
assert segments[5].segment_ref == VerseRef.from_string("MAT 2:1", corpus.versification)
assert segments[5].segment[0] == "Chapter two, verse one."
assert segments[6].segment_ref == VerseRef.from_string("MAT 2:2", corpus.versification)
assert segments[6].segment[0] == "Chapter two, verse two. Chapter two, verse three."
assert segments[6].is_in_range
assert segments[7].segment_ref == VerseRef.from_string("MAT 2:3", corpus.versification)
assert len(segments[7].segment) == 0
assert segments[7].is_in_range
assert segments[8].segment_ref == VerseRef.from_string("MAT 2:4a", corpus.versification)
assert len(segments[8].segment) == 0
assert segments[8].is_in_range
assert segments[9].segment_ref == VerseRef.from_string("MAT 2:4b", corpus.versification)
assert segments[9].segment[0] == "Chapter two, verse four."
assert segments[10].segment_ref == VerseRef.from_string("MAT 2:5", corpus.versification)
assert segments[10].segment[0] == "Chapter two, verse five."
assert segments[11].segment_ref == VerseRef.from_string("MAT 2:6", corpus.versification)
assert segments[11].segment[0] == "Chapter two, verse six."
def test_get_segments_sentence_start() -> None:
tokenizer = NullTokenizer()
corpus = UsfmFileTextCorpus(tokenizer, USFM_STYLESHEET_PATH, "utf-8-sig", USFM_TEST_PROJECT_PATH)
text = corpus.get_text("MAT")
segments = list(text.get_segments())
assert len(segments) == 14
assert segments[3].segment_ref == VerseRef.from_string("MAT 1:4", corpus.versification)
assert segments[3].segment[0] == "Chapter one, verse four,"
assert segments[3].is_sentence_start
assert segments[4].segment_ref == VerseRef.from_string("MAT 1:5", corpus.versification)
assert segments[4].segment[0] == "Chapter one, verse five."
assert not segments[4].is_sentence_start
def test_get_segments_empty_text() -> None:
tokenizer = NullTokenizer()
corpus = UsfmFileTextCorpus(tokenizer, USFM_STYLESHEET_PATH, "utf-8-sig", USFM_TEST_PROJECT_PATH)
text = corpus.get_text("MRK")
segments = list(text.get_segments())
assert len(segments) == 0
def test_get_segments_include_markers() -> None:
tokenizer = NullTokenizer()
corpus = UsfmFileTextCorpus(
tokenizer, USFM_STYLESHEET_PATH, "utf-8-sig", USFM_TEST_PROJECT_PATH, include_markers=True
)
text = corpus.get_text("MAT")
segments = list(text.get_segments())
assert len(segments) == 14
assert segments[0].segment_ref == VerseRef.from_string("MAT 1:1", corpus.versification)
assert segments[0].segment[0] == "Chapter one, verse one.\\f + \\fr 1:1: \\ft This is a footnote.\\f*"
assert segments[1].segment_ref == VerseRef.from_string("MAT 1:2", corpus.versification)
assert segments[1].segment[0] == "Chapter one, \\li2 verse two."
assert segments[4].segment_ref == VerseRef.from_string("MAT 1:5", corpus.versification)
assert (
segments[4].segment[0]
== 'Chapter one, \\li2 verse \\fig Figure 1|src="image1.png" size="col" ref="1:5"\\fig* five.'
)
assert segments[5].segment_ref == VerseRef.from_string("MAT 2:1", corpus.versification)
assert segments[5].segment[0] == "Chapter \\add two\\add*, verse\\f + \\fr 2:1: \\ft This is a footnote.\\f* one."
assert segments[6].segment_ref == VerseRef.from_string("MAT 2:2", corpus.versification)
assert segments[6].segment[0] == "Chapter two, verse two. Chapter two, verse three."
assert segments[6].is_in_range
assert segments[7].segment_ref == VerseRef.from_string("MAT 2:3", corpus.versification)
assert len(segments[7].segment) == 0
assert segments[7].is_in_range
assert segments[8].segment_ref == VerseRef.from_string("MAT 2:4a", corpus.versification)
assert len(segments[8].segment) == 0
assert segments[8].is_in_range
assert segments[9].segment_ref == VerseRef.from_string("MAT 2:4b", corpus.versification)
assert segments[9].segment[0] == "Chapter two, verse four."
assert segments[10].segment_ref == VerseRef.from_string("MAT 2:5", corpus.versification)
assert segments[10].segment[0] == "Chapter two, verse five."
assert segments[11].segment_ref == VerseRef.from_string("MAT 2:6", corpus.versification)
assert segments[11].segment[0] == 'Chapter two, verse \\w six|strong="12345" \\w*.'
def test_get_segments_based_on() -> None:
tokenizer = NullTokenizer()
src = "MAT 1:2 = MAT 1:3\nMAT 1:3 = MAT 1:2\n"
stream = StringIO(src)
versification = Versification("custom", "vers.txt", ENGLISH_VERSIFICATION)
versification = Versification.parse(stream, "vers.txt", versification, "custom")
corpus = UsfmFileTextCorpus(tokenizer, USFM_STYLESHEET_PATH, "utf-8-sig", USFM_TEST_PROJECT_PATH, versification)
based_on_text = NullScriptureText(tokenizer, "MAT", ORIGINAL_VERSIFICATION)
text = corpus.get_text("MAT")
segments = list(text.get_segments_based_on(based_on_text))
assert len(segments) == 14
assert segments[0].segment_ref == VerseRef.from_string("MAT 1:1", ORIGINAL_VERSIFICATION)
assert segments[0].segment[0] == "Chapter one, verse one."
assert segments[1].segment_ref == VerseRef.from_string("MAT 1:2", ORIGINAL_VERSIFICATION)
assert segments[1].segment[0] == "Chapter one, verse three."
assert segments[2].segment_ref == VerseRef.from_string("MAT 1:3", ORIGINAL_VERSIFICATION)
assert segments[2].segment[0] == "Chapter one, verse two."
assert segments[4].segment_ref == VerseRef.from_string("MAT 1:5", ORIGINAL_VERSIFICATION)
assert segments[4].segment[0] == "Chapter one, verse five."
assert segments[5].segment_ref == VerseRef.from_string("MAT 2:1", ORIGINAL_VERSIFICATION)
assert segments[5].segment[0] == "Chapter two, verse one."
assert segments[6].segment_ref == VerseRef.from_string("MAT 2:2", ORIGINAL_VERSIFICATION)
assert segments[6].segment[0] == "Chapter two, verse two. Chapter two, verse three."
assert segments[6].is_in_range
assert segments[7].segment_ref == VerseRef.from_string("MAT 2:3", ORIGINAL_VERSIFICATION)
assert len(segments[7].segment) == 0
assert segments[7].is_in_range
assert segments[8].segment_ref == VerseRef.from_string("MAT 2:4a", ORIGINAL_VERSIFICATION)
assert len(segments[8].segment) == 0
assert segments[8].is_in_range
assert segments[9].segment_ref == VerseRef.from_string("MAT 2:4b", ORIGINAL_VERSIFICATION)
assert segments[9].segment[0] == "Chapter two, verse four."
assert segments[10].segment_ref == VerseRef.from_string("MAT 2:5", ORIGINAL_VERSIFICATION)
assert segments[10].segment[0] == "Chapter two, verse five."
assert segments[11].segment_ref == VerseRef.from_string("MAT 2:6", ORIGINAL_VERSIFICATION)
assert segments[11].segment[0] == "Chapter two, verse six."
```
#### File: tests/scripture/test_verse_ref.py
```python
from pytest import raises
from machine.scripture import (
ENGLISH_VERSIFICATION,
LAST_BOOK,
ORIGINAL_VERSIFICATION,
RUSSIAN_ORTHODOX_VERSIFICATION,
SEPTUAGINT_VERSIFICATION,
VULGATE_VERSIFICATION,
ValidStatus,
VerseRef,
Versification,
get_bbbcccvvv,
)
def test_constructor() -> None:
vref = VerseRef(1, 2, 3, SEPTUAGINT_VERSIFICATION)
assert vref.is_valid
assert vref.bbbcccvvv == 1002003
assert vref.bbbcccvvvs == "001002003"
assert vref.book_num == 1
assert vref.book == "GEN"
assert vref.chapter_num == 2
assert vref.chapter == "2"
assert vref.verse_num == 3
assert vref.versification == SEPTUAGINT_VERSIFICATION
vref = VerseRef(4, 5, 6)
assert vref.bbbcccvvv == 4005006
assert vref.bbbcccvvvs == "004005006"
assert vref.book_num == 4
assert vref.book == "NUM"
assert vref.chapter_num == 5
assert vref.verse_num == 6
assert vref.versification == ENGLISH_VERSIFICATION
vref = VerseRef("LUK", "3", "4b-5a", VULGATE_VERSIFICATION)
assert vref.is_valid
assert vref.bbbcccvvv == 42003004
assert vref.bbbcccvvvs == "042003004b"
assert vref.book_num == 42
assert vref.chapter_num == 3
assert vref.verse_num == 4
assert vref.verse == "4b-5a"
assert vref.validated_segment() == "b"
assert sum(1 for _ in vref.all_verses()) == 2
assert vref.versification == VULGATE_VERSIFICATION
# Confirm RTL marker is removed
vref = VerseRef("LUK", "3", "4b\u200f-5a", VULGATE_VERSIFICATION)
assert vref.is_valid
assert vref.bbbcccvvv == 42003004
assert vref.bbbcccvvvs == "042003004b"
assert vref.book_num == 42
assert vref.chapter_num == 3
assert vref.verse_num == 4
assert vref.verse == "4b-5a"
assert vref.validated_segment() == "b"
assert sum(1 for _ in vref.all_verses()) == 2
assert vref.versification == VULGATE_VERSIFICATION
def test_from_string() -> None:
vref = VerseRef.from_string("LUK 3:4b-5a", VULGATE_VERSIFICATION)
assert vref.is_valid
assert vref.bbbcccvvv == 42003004
assert vref.bbbcccvvvs == "042003004b"
assert vref.book_num == 42
assert vref.chapter_num == 3
assert vref.verse_num == 4
assert vref.verse == "4b-5a"
assert vref.validated_segment() == "b"
assert sum(1 for _ in vref.all_verses()) == 2
assert vref.versification == VULGATE_VERSIFICATION
# Confirm RTL marker is removed
vref = VerseRef.from_string("LUK 3\u200f:4\u200f-5", VULGATE_VERSIFICATION)
assert vref.is_valid
assert vref.bbbcccvvv == 42003004
assert vref.bbbcccvvvs == "042003004"
assert vref.book_num == 42
assert vref.chapter_num == 3
assert vref.verse_num == 4
assert vref.verse == "4-5"
assert vref.validated_segment() == ""
assert sum(1 for _ in vref.all_verses()) == 2
assert vref.versification == VULGATE_VERSIFICATION
def test_from_bbbcccvvv() -> None:
vref = VerseRef.from_bbbcccvvv(12015013)
assert vref.bbbcccvvv == 12015013
assert vref.bbbcccvvvs == "012015013"
assert vref.book == "2KI"
assert vref.book_num == 12
assert vref.chapter_num == 15
assert vref.verse_num == 13
assert vref.verse == "13"
assert vref.versification == ENGLISH_VERSIFICATION
def test_chapter_and_verse_as_empty_strings() -> None:
vref = VerseRef("LUK", "", "", SEPTUAGINT_VERSIFICATION)
assert vref.valid_status == ValidStatus.OUT_OF_RANGE
assert vref.book == "LUK"
assert vref.chapter == ""
assert vref.verse == ""
assert vref.book_num == 42
assert vref.chapter_num == -1
assert vref.verse_num == -1
vref = VerseRef("LUK", "5", "3", SEPTUAGINT_VERSIFICATION)
vref.verse = ""
vref.chapter = ""
assert vref.valid_status == ValidStatus.OUT_OF_RANGE
assert vref.chapter == ""
assert vref.verse == ""
assert vref.chapter_num == -1
assert vref.verse_num == -1
def test_verse_with_rtl_markers() -> None:
vref = VerseRef("LUK", "5", "1\u200f-2", SEPTUAGINT_VERSIFICATION)
assert vref.valid_status == ValidStatus.VALID
assert vref.book == "LUK"
assert vref.chapter == "5"
assert vref.verse == "1-2"
assert vref.book_num == 42
assert vref.chapter_num == 5
assert vref.verse_num == 1
def test_build_verse_ref_by_props() -> None:
vref = VerseRef()
vref.versification = ENGLISH_VERSIFICATION
assert vref.valid_status == ValidStatus.OUT_OF_RANGE
assert vref.bbbcccvvv == 0
vref.book_num = 13
assert vref.valid_status == ValidStatus.OUT_OF_RANGE
assert vref.bbbcccvvv == 13000000
assert vref.book_num == 13
assert vref.chapter_num == 0
assert vref.verse_num == 0
vref.chapter_num = 1
vref.verse_num = 0
# a zero verse is considered valid for introduction, etc, but only for chapter 1
assert vref.is_valid
assert vref.bbbcccvvv == 13001000
assert vref.book_num == 13
assert vref.chapter_num == 1
assert vref.verse_num == 0
vref.chapter_num = 14
vref.verse_num = 15
assert vref.is_valid
assert vref.bbbcccvvv == 13014015
assert vref.book_num == 13
assert vref.chapter_num == 14
assert vref.verse_num == 15
vref = VerseRef()
vref.versification = ENGLISH_VERSIFICATION
vref.chapter_num = 16
# Invalid because 0 is not valid for the book number
assert vref.valid_status == ValidStatus.OUT_OF_RANGE
assert vref.bbbcccvvv == 16000
assert vref.book_num == 0
assert vref.chapter_num == 16
assert vref.verse_num == 0
vref = VerseRef()
vref.versification = ENGLISH_VERSIFICATION
vref.verse_num = 17
# Invalid because 0 is not valid for the book and chapter numbers
assert vref.valid_status == ValidStatus.OUT_OF_RANGE
assert vref.bbbcccvvv == 17
assert vref.book_num == 0
assert vref.chapter_num == 0
assert vref.verse_num == 17
def test_invalid() -> None:
with raises(ValueError):
VerseRef(-1, 1, 1)
with raises(ValueError):
VerseRef(0, 1, 1)
with raises(ValueError):
VerseRef(LAST_BOOK + 1, 1, 1)
with raises(ValueError):
VerseRef(2, -42, 1)
with raises(ValueError):
VerseRef(2, 1, -4)
with raises(ValueError):
VerseRef.from_string("MAT 1:")
with raises(ValueError):
VerseRef.from_string("MAT 1:2-")
with raises(ValueError):
VerseRef.from_string("MAT 1:2,")
vref = VerseRef(1, 1023, 5051, ENGLISH_VERSIFICATION)
assert vref.valid_status == ValidStatus.OUT_OF_RANGE
assert vref.book_num == 1
assert vref.chapter_num == 1023
assert vref.verse_num == 5051
vref = VerseRef("GEN", "F", "@", ENGLISH_VERSIFICATION)
assert vref.valid_status == ValidStatus.OUT_OF_RANGE
assert vref.book_num == 1
assert vref.chapter_num == -1
assert vref.verse_num == -1
def test_segments() -> None:
assert VerseRef.from_string("MAT 3:13").bbbcccvvvs == "040003013"
assert VerseRef.from_string("MAT 3:12a").bbbcccvvvs == "040003012a"
assert VerseRef.from_string("1KI 2:35a-35h").bbbcccvvvs == "011002035a"
assert VerseRef.from_string("ESG 8:8a").bbbcccvvvs == "069008008a"
assert VerseRef.from_string("MAT 12:1-3,5a,6c-9").bbbcccvvvs == "040012001"
assert VerseRef.from_string("MAT 3:13b-12a").bbbcccvvvs == "040003013b"
def test_is_valid() -> None:
assert VerseRef.from_string("GEN 1:1").is_valid
assert VerseRef.from_string("GEN 1:1-2").is_valid
assert VerseRef.from_string("GEN 1:1,3").is_valid
assert VerseRef.from_string("GEN 1:1,3,7").is_valid
assert VerseRef.from_string("PSA 119:1,3-6").is_valid
def test_is_valid_segments() -> None:
assert VerseRef.from_string("GEN 1:1b").is_valid
assert VerseRef.from_string("GEN 1:1c-2a").is_valid
assert VerseRef.from_string("GEN 1:1a,3b").is_valid
assert VerseRef.from_string("GEN 1:1a,3c,7b").is_valid
assert VerseRef.from_string("GEN 1:1a,3c-6a").is_valid
def test_valid_status_invalid_order() -> None:
assert VerseRef.from_string("GEN 1:2-1").valid_status == ValidStatus.VERSE_OUT_OF_ORDER
assert VerseRef.from_string("GEN 1:2,1").valid_status == ValidStatus.VERSE_OUT_OF_ORDER
assert VerseRef.from_string("GEN 1:2-3,1").valid_status == ValidStatus.VERSE_OUT_OF_ORDER
assert VerseRef.from_string("GEN 1:5,2-3").valid_status == ValidStatus.VERSE_OUT_OF_ORDER
def test_valid_status_invalid_in_versification() -> None:
# Invalid chapters
assert VerseRef.from_string("GEN 100:1").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("PHM 2:1").valid_status == ValidStatus.OUT_OF_RANGE
# Invalid verses
assert VerseRef.from_string("GEN 1:100").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:100-2").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:1-200").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:100,3").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:1,300").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:100,3,7").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:1,300,7").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:1,3,700").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:100,3-6").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:1,300-6").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:1,3-600").valid_status == ValidStatus.OUT_OF_RANGE
def test_valid_status_invalid_excluded_in_versification() -> None:
versification = Versification.create("Dummy")
versification.excluded_verses.add(VerseRef.from_string("GEN 1:30").bbbcccvvv)
# Valid verses (surrounding excluded verse)
assert VerseRef.from_string("GEN 1:29", versification).is_valid
assert VerseRef.from_string("GEN 1:31", versification).is_valid
# Invalid (excluded) verse
assert VerseRef.from_string("GEN 1:30", versification).valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:30,31", versification).valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:29,30", versification).valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:29-30", versification).valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:30-31", versification).valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:30b", versification).valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:30b-31a", versification).valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:29b-30a", versification).valid_status == ValidStatus.OUT_OF_RANGE
def test_valid_status_excluded_verse() -> None:
versification = Versification.create("Dummy")
versification.excluded_verses.add(get_bbbcccvvv(1, 2, 2))
# If an excluded verse is within a verse range, it is valid.
assert VerseRef.from_string("GEN 2:1-3", versification).is_valid
# If an excluded verse is explicitly included in the reference, it is invalid.
assert VerseRef.from_string("GEN 2:2", versification).valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 2:2-3", versification).valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 2:1-2", versification).valid_status == ValidStatus.OUT_OF_RANGE
def test_valid_status_invalid_versification_on_segments() -> None:
assert VerseRef.from_string("GEN 1:100b").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:1c-200a").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:1a,300b").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:1a,3c,700b").valid_status == ValidStatus.OUT_OF_RANGE
assert VerseRef.from_string("GEN 1:1a,3c-600a").valid_status == ValidStatus.OUT_OF_RANGE
def test_from_string_valid() -> None:
vref = VerseRef.from_string("Gen 1:1", ENGLISH_VERSIFICATION)
assert vref.is_valid
assert vref.bbbcccvvv == 1001001
def test_from_string_bridge() -> None:
vref = VerseRef.from_string("NUM 5:1-5", ENGLISH_VERSIFICATION)
assert vref.is_valid
assert vref.bbbcccvvv == 4005001
assert vref.bbbcccvvvs == "004005001"
assert str(vref) == "NUM 5:1-5"
assert vref.str_with_versification() == "NUM 5:1-5/4"
assert vref.book_num == 4
assert vref.chapter_num == 5
assert vref.verse_num == 1
assert vref.verse == "1-5"
assert vref.versification == ENGLISH_VERSIFICATION
def test_from_string_bridge_with_versification() -> None:
vref = VerseRef.from_string("NUM 5:1-5/2")
assert vref.is_valid
assert vref.bbbcccvvv == 4005001
assert vref.bbbcccvvvs == "004005001"
assert str(vref) == "NUM 5:1-5"
assert vref.str_with_versification() == "NUM 5:1-5/2"
assert vref.book_num == 4
assert vref.chapter_num == 5
assert vref.verse_num == 1
assert vref.verse == "1-5"
assert vref.versification == SEPTUAGINT_VERSIFICATION
def test_from_string_book_intro() -> None:
vref = VerseRef.from_string("JOS 1:0")
assert vref.is_valid
assert vref.bbbcccvvv == 6001000
def test_from_string_chapter_intro() -> None:
vref = VerseRef.from_string("JOS 2:0")
assert vref.is_valid
assert vref.bbbcccvvv == 6002000
def test_from_string_weird() -> None:
vref = VerseRef.from_string("EXO 0:18")
assert not vref.is_valid
assert vref.bbbcccvvv == 2000018
assert vref.book_num == 2
assert vref.chapter_num == 0
assert vref.verse_num == 18
def test_parse_ref_invalid_book() -> None:
with raises(ValueError):
VerseRef.from_string("BLA 1:1")
with raises(ValueError):
VerseRef("BLA", "1", "1")
def test_from_string_invalid_numbers() -> None:
with raises(ValueError):
VerseRef.from_string("EXO 6:-18")
with raises(ValueError):
VerseRef.from_string("EXO -1:18")
def test_from_string_letters() -> None:
with raises(ValueError):
VerseRef.from_string("EXO F:18")
with raises(ValueError):
VerseRef.from_string("EXO 1:F")
def test_copy_from() -> None:
source = VerseRef("LUK", "3", "4b-6a", VULGATE_VERSIFICATION)
dest = VerseRef()
dest.copy_from(source)
# Now change the source to ensure that we didn't just make it referentially equal.
source.book_num = 2
source.chapter_num = 6
source.verse_num = 9
source.versification = ENGLISH_VERSIFICATION
assert dest.book == "LUK"
assert dest.chapter_num == 3
assert dest.verse == "4b-6a"
assert dest.verse_num == 4
assert dest.versification == VULGATE_VERSIFICATION
def test_copy_verse_from() -> None:
source = VerseRef("LUK", "3", "4b-6a", VULGATE_VERSIFICATION)
dest = VerseRef(1, 3, 5, RUSSIAN_ORTHODOX_VERSIFICATION)
dest.copy_verse_from(source)
# Now change the source to ensure that we didn't just make it referentially equal.
source.book_num = 2
source.chapter_num = 6
source.verse_num = 9
source.versification = ENGLISH_VERSIFICATION
assert dest.book == "GEN"
assert dest.chapter_num == 3
assert dest.verse == "4b-6a"
assert dest.verse_num == 4
assert dest.versification == RUSSIAN_ORTHODOX_VERSIFICATION
# Now test when the source just has a plain verse number (no bridges or segments)
dest.copy_verse_from(source)
assert dest.book == "GEN"
assert dest.chapter_num == 3
assert dest.verse == "9"
assert dest.verse_num == 9
assert dest.versification == RUSSIAN_ORTHODOX_VERSIFICATION
def test_all_verses_bridge() -> None:
vref = VerseRef("LUK", "3", "4b-6a", VULGATE_VERSIFICATION)
assert list(vref.all_verses()) == [
VerseRef("LUK", "3", "4b", VULGATE_VERSIFICATION),
VerseRef("LUK", "3", "5", VULGATE_VERSIFICATION),
VerseRef("LUK", "3", "6a", VULGATE_VERSIFICATION),
]
def test_all_verses_simple_verse() -> None:
vref = VerseRef("LUK", "3", "12", VULGATE_VERSIFICATION)
assert list(vref.all_verses()) == [vref]
def test_all_verses_verse_with_segment() -> None:
vref = VerseRef("LUK", "3", "12v", VULGATE_VERSIFICATION)
assert list(vref.all_verses()) == [vref]
def test_get_ranges_single_verse() -> None:
vref = VerseRef.from_string("LUK 3:12", ORIGINAL_VERSIFICATION)
assert list(vref.get_ranges()) == [VerseRef.from_string("LUK 3:12", ORIGINAL_VERSIFICATION)]
def test_get_ranges_single_range() -> None:
vref = VerseRef.from_string("LUK 3:12-14", ORIGINAL_VERSIFICATION)
assert list(vref.get_ranges()) == [VerseRef.from_string("LUK 3:12-14", ORIGINAL_VERSIFICATION)]
def test_get_ranges_multiple_ranges() -> None:
vref = VerseRef.from_string("LUK 3:12-14,16-17", ORIGINAL_VERSIFICATION)
assert list(vref.get_ranges()) == [
VerseRef.from_string("LUK 3:12-14", ORIGINAL_VERSIFICATION),
VerseRef.from_string("LUK 3:16-17", ORIGINAL_VERSIFICATION),
]
def test_get_ranges_complicated_ranges() -> None:
vref = VerseRef.from_string("LUK 3:12-14,16b-17a,18a,19,20", ORIGINAL_VERSIFICATION)
assert list(vref.get_ranges()) == [
VerseRef.from_string("LUK 3:12-14", ORIGINAL_VERSIFICATION),
VerseRef.from_string("LUK 3:16b-17a", ORIGINAL_VERSIFICATION),
VerseRef.from_string("LUK 3:18a", ORIGINAL_VERSIFICATION),
VerseRef.from_string("LUK 3:19", ORIGINAL_VERSIFICATION),
VerseRef.from_string("LUK 3:20", ORIGINAL_VERSIFICATION),
]
def test_lt() -> None:
assert VerseRef(1, 1, 1) < VerseRef(2, 1, 1)
assert not (VerseRef(10, 1, 1) < VerseRef(1, 1, 1))
assert VerseRef("GEN", "1", "1a") < VerseRef("GEN", "1", "1b")
assert VerseRef(1, 1, 1) < VerseRef("GEN", "1", "1a")
assert not (VerseRef("GEN", "1", "1a") < VerseRef(1, 1, 1))
def test_le() -> None:
assert VerseRef(1, 1, 1) <= VerseRef(2, 1, 1)
assert not (VerseRef(10, 1, 1) <= VerseRef(1, 1, 1))
assert VerseRef(1, 1, 1) <= VerseRef(1, 1, 1)
assert VerseRef("GEN", "1", "1a") <= VerseRef("GEN", "1", "1b")
assert VerseRef("GEN", "1", "1a") <= VerseRef("GEN", "1", "1a")
assert VerseRef(1, 1, 1) <= VerseRef("GEN", "1", "1a")
assert not (VerseRef("GEN", "1", "1a") <= VerseRef(1, 1, 1))
def test_gt() -> None:
assert VerseRef(2, 1, 1) > VerseRef(1, 1, 1)
assert not (VerseRef(1, 1, 1) > VerseRef(10, 1, 1))
assert VerseRef("GEN", "1", "1b") > VerseRef("GEN", "1", "1a")
assert VerseRef("GEN", "1", "1a") > VerseRef(1, 1, 1)
assert not (VerseRef(1, 1, 1) > VerseRef("GEN", "1", "1a"))
def test_ge() -> None:
assert VerseRef(2, 1, 1) >= VerseRef(1, 1, 1)
assert not (VerseRef(1, 1, 1) >= VerseRef(10, 1, 1))
assert VerseRef(1, 1, 1) >= VerseRef(1, 1, 1)
assert VerseRef("GEN", "1", "1b") >= VerseRef("GEN", "1", "1a")
assert VerseRef("GEN", "1", "1a") >= VerseRef("GEN", "1", "1a")
assert VerseRef("GEN", "1", "1a") >= VerseRef(1, 1, 1)
assert not (VerseRef(1, 1, 1) >= VerseRef("GEN", "1", "1a"))
def test_eq() -> None:
assert VerseRef(1, 1, 1) == VerseRef(1, 1, 1)
assert VerseRef("GEN", "1", "1a") == VerseRef("GEN", "1", "1a")
assert VerseRef("GEN", "1", "1a") != VerseRef("GEN", "1", "1b")
assert VerseRef("GEN", "1", "1a") != VerseRef(1, 1, 1)
assert VerseRef("GEN", "1", "1a") != 1001001
def test_change_versification() -> None:
vref = VerseRef.from_string("EXO 6:0", ENGLISH_VERSIFICATION)
vref.change_versification(ORIGINAL_VERSIFICATION)
assert vref == VerseRef.from_string("EXO 6:0", ORIGINAL_VERSIFICATION)
vref = VerseRef.from_string("GEN 31:55", ENGLISH_VERSIFICATION)
vref.change_versification(ORIGINAL_VERSIFICATION)
assert vref == VerseRef.from_string("GEN 32:1", ORIGINAL_VERSIFICATION)
vref = VerseRef.from_string("ESG 1:2", ENGLISH_VERSIFICATION)
vref.change_versification(SEPTUAGINT_VERSIFICATION)
assert vref == VerseRef.from_string("ESG 1:1b", SEPTUAGINT_VERSIFICATION)
vref = VerseRef.from_string("ESG 1:1b", SEPTUAGINT_VERSIFICATION)
vref.change_versification(ENGLISH_VERSIFICATION)
assert vref == VerseRef.from_string("ESG 1:2", ENGLISH_VERSIFICATION)
vref = VerseRef.from_string("ESG 1:3", RUSSIAN_ORTHODOX_VERSIFICATION)
vref.change_versification(SEPTUAGINT_VERSIFICATION)
assert vref == VerseRef.from_string("ESG 1:1c", SEPTUAGINT_VERSIFICATION)
vref = VerseRef.from_string("ESG 1:1c", SEPTUAGINT_VERSIFICATION)
vref.change_versification(RUSSIAN_ORTHODOX_VERSIFICATION)
assert vref == VerseRef.from_string("ESG 1:3", RUSSIAN_ORTHODOX_VERSIFICATION)
def test_change_versification_with_ranges() -> None:
vref = VerseRef.from_string("EXO 6:0", ENGLISH_VERSIFICATION)
assert vref.change_versification(ORIGINAL_VERSIFICATION)
assert vref == VerseRef.from_string("EXO 6:0", ORIGINAL_VERSIFICATION)
vref = VerseRef.from_string("GEN 31:55", ENGLISH_VERSIFICATION)
assert vref.change_versification(ORIGINAL_VERSIFICATION)
assert vref == VerseRef.from_string("GEN 32:1", ORIGINAL_VERSIFICATION)
vref = VerseRef.from_string("GEN 32:3-4", ENGLISH_VERSIFICATION)
assert vref.change_versification(ORIGINAL_VERSIFICATION)
assert vref == VerseRef.from_string("GEN 32:4-5", ORIGINAL_VERSIFICATION)
# This is the case where this can't really work properly
vref = VerseRef.from_string("GEN 31:54-55", ENGLISH_VERSIFICATION)
assert not vref.change_versification(ORIGINAL_VERSIFICATION)
assert vref == VerseRef.from_string("GEN 31:54-1", ORIGINAL_VERSIFICATION)
def test_compare_to_with_without_verse_bridges() -> None:
vref_without_bridge = VerseRef(1, 1, 2)
vref_with_bridge = VerseRef.from_string("GEN 1:2-3")
assert vref_with_bridge.compare_to(vref_without_bridge) > 0
assert vref_without_bridge.compare_to(vref_with_bridge) < 0
def test_compare_to_same_verse_bridge() -> None:
vref1 = VerseRef.from_string("GEN 1:1-2")
vref2 = VerseRef.from_string("GEN 1:1-2")
assert vref2.compare_to(vref1) == 0
def test_compare_to_overlapping_verse_bridges() -> None:
vref1 = VerseRef.from_string("GEN 1:1-2")
vref2 = VerseRef.from_string("GEN 1:2-3")
assert vref2.compare_to(vref1) > 0
assert vref1.compare_to(vref2) < 0
def test_compare_to_verse_lists() -> None:
vref1 = VerseRef.from_string("GEN 1:2,3,21")
vref2 = VerseRef.from_string("GEN 1:2,21")
assert vref2.compare_to(vref1) > 0
assert vref1.compare_to(vref2) < 0
vref1 = VerseRef.from_string("GEN 1:2,3,21")
vref2 = VerseRef.from_string("GEN 1:2,3")
assert vref2.compare_to(vref1) < 0
assert vref1.compare_to(vref2) > 0
def test_compare_to_verse_bridge_includes_another() -> None:
vref1 = VerseRef.from_string("GEN 1:1-2")
vref2 = VerseRef.from_string("GEN 1:1-5")
assert vref2.compare_to(vref1) > 0
assert vref1.compare_to(vref2) < 0
def test_compare_to_versification_makes_different_verse_same() -> None:
vref1 = VerseRef.from_string("EXO 8:1", ENGLISH_VERSIFICATION)
# Set up another VerseRef that has a different verse that is defined to be same as EXO 8:1 in the Septuagint
# (The Septuagint is the same as original versification for these verses).
vref2 = VerseRef.from_string("EXO 7:26", SEPTUAGINT_VERSIFICATION)
assert vref2.compare_to(vref1) == 0
assert vref1.compare_to(vref2) == 0
def test_compare_to_versification_makes_different_verse_range_same() -> None:
vref1 = VerseRef.from_string("EXO 8:2-3", ENGLISH_VERSIFICATION)
# Set up another VerseRef that has a different verse range that is defined to be same as EXO 8:2-3 in original
# versification.
vref2 = VerseRef.from_string("EXO 7:27-28", ORIGINAL_VERSIFICATION)
assert vref2.compare_to(vref1) == 0
assert vref1.compare_to(vref2) == 0
def test_compare_to_versification_makes_same_verse_different() -> None:
vref1 = VerseRef.from_string("EXO 8:1", ENGLISH_VERSIFICATION)
# Set up another VerseRef that has a different verse that is different from original.
vref2 = VerseRef.from_string("EXO 8:1", ORIGINAL_VERSIFICATION)
# Changing English ref to standard versification (EXO 8:1 => EXO 7:26) so difference (1) is found in chapter number
# that is evaluated first.
assert vref2.compare_to(vref1) > 0
# Changing Septuagint ref to English versification EXO 8:1 => EXO 8:5 so difference (-4) is found in verse number
# since book and chapter numbers are the same.
assert vref1.compare_to(vref2) < 0
def test_compare_to_versification_makes_same_verse_range_different() -> None:
vref1 = VerseRef.from_string("EXO 8:2-3", ENGLISH_VERSIFICATION)
# Set up another VerseRef that has a different verse that is different from original.
vref2 = VerseRef.from_string("EXO 8:2-3", SEPTUAGINT_VERSIFICATION)
# Changing English ref to standard versification (EXO 8:2-3 => EXO 7:27-28) so difference (1) is found in chapter
# number that is evaluated first.
assert vref2.compare_to(vref1) > 0
# Changing Septuagint ref to English versification (EXO 8:2-3 => EXO 8:6-7) so difference (-4) is found in verse
# number since book and chapter numbers are the same.
assert vref1.compare_to(vref2) < 0
def test_compare_to_segments() -> None:
assert VerseRef.from_string("GEN 1:1a").compare_to(VerseRef.from_string("GEN 1:1")) > 0
assert VerseRef.from_string("GEN 1:1").compare_to(VerseRef.from_string("GEN 1:1a")) < 0
assert VerseRef.from_string("GEN 1:1a").compare_to(VerseRef.from_string("GEN 1:1b")) < 0
assert VerseRef.from_string("GEN 1:1b").compare_to(VerseRef.from_string("GEN 1:1a")) > 0
assert VerseRef.from_string("GEN 1:1a").compare_to(VerseRef.from_string("GEN 1:1a")) == 0
assert VerseRef.from_string("GEN 1:1b").compare_to(VerseRef.from_string("GEN 1:1b")) == 0
def test_validated_segment() -> None:
assert VerseRef.from_string("GEN 1:1").validated_segment() == ""
assert VerseRef.from_string("GEN 1:1a").validated_segment() == "a"
assert VerseRef.from_string("GEN 1:1@").validated_segment() == "@"
assert VerseRef.from_string("GEN 1:1a-5c").validated_segment() == "a"
assert VerseRef.from_string("GEN 1:1-5c").validated_segment() == ""
assert VerseRef.from_string("GEN 1:1b-3c").validated_segment() == "b"
assert VerseRef.from_string("GEN 1:1a,3,5").validated_segment() == "a"
assert VerseRef.from_string("GEN 1:1,3b,5").validated_segment() == ""
assert VerseRef.from_string("GEN 1:1abc").validated_segment() == "abc"
assert VerseRef.from_string("GEN 1:1a\u0301").validated_segment() == "a\u0301"
def test_validated_segment_with_versification_info() -> None:
versification = Versification.create("Dummy")
versification.verse_segments[get_bbbcccvvv(1, 1, 1)] = {"", "@", "$", "%", "abc", "a\u0301"}
assert VerseRef.from_string("GEN 1:1", versification).validated_segment() == ""
assert VerseRef.from_string("GEN 1:1a", versification).validated_segment() == ""
assert VerseRef.from_string("GEN 1:1@", versification).validated_segment() == "@"
assert VerseRef.from_string("GEN 1:1!", versification).validated_segment() == ""
assert VerseRef.from_string("GEN 1:1def", versification).validated_segment() == ""
assert VerseRef.from_string("GEN 1:2a", versification).validated_segment() == "a"
assert VerseRef.from_string("GEN 1:2b", versification).validated_segment() == "b"
assert VerseRef.from_string("GEN 1:1abc", versification).validated_segment() == "abc"
assert VerseRef.from_string("GEN 1:1abcdef", versification).validated_segment() == ""
assert VerseRef.from_string("GEN 1:1a\u0301", versification).validated_segment() == "a\u0301"
def test_validated_segment_with_defined_default_segments() -> None:
defined_segments = {"@", "$", "%", "abc", "a\u0301"}
assert VerseRef.from_string("GEN 1:1").validated_segment(defined_segments) == ""
assert VerseRef.from_string("GEN 1:1a").validated_segment(defined_segments) == ""
assert VerseRef.from_string("GEN 1:1@").validated_segment(defined_segments) == "@"
assert VerseRef.from_string("GEN 1:1$").validated_segment(defined_segments) == "$"
assert VerseRef.from_string("GEN 1:1!").validated_segment(defined_segments) == ""
assert VerseRef.from_string("GEN 1:1abc").validated_segment(defined_segments) == "abc"
assert VerseRef.from_string("GEN 1:1def").validated_segment(defined_segments) == ""
assert VerseRef.from_string("GEN 1:1a\u0301").validated_segment(defined_segments) == "a\u0301"
def test_validated_segment_with_versification_and_defined_default_segments() -> None:
versification = Versification.create("Dummy")
versification.verse_segments[get_bbbcccvvv(1, 1, 1)] = {"^", "&", "*", "a\u0301"}
defined_segments = {"@", "$", "%", "o\u0301"}
assert VerseRef.from_string("GEN 1:1*", versification).validated_segment(defined_segments) == "*"
assert VerseRef.from_string("GEN 1:1a\u0301", versification).validated_segment(defined_segments) == "a\u0301"
assert VerseRef.from_string("GEN 1:2a\u0301", versification).validated_segment(defined_segments) == ""
assert VerseRef.from_string("GEN 1:2*", versification).validated_segment(defined_segments) == ""
assert VerseRef.from_string("GEN 1:1@", versification).validated_segment(defined_segments) == ""
assert VerseRef.from_string("GEN 1:1o\u0301", versification).validated_segment(defined_segments) == ""
assert VerseRef.from_string("GEN 1:2@", versification).validated_segment(defined_segments) == "@"
assert VerseRef.from_string("GEN 1:2o\u0301", versification).validated_segment(defined_segments) == "o\u0301"
def test_str() -> None:
assert str(VerseRef(1, 0, 0)) == "GEN 0:0"
assert str(VerseRef(1, 1, 0)) == "GEN 1:0"
assert str(VerseRef(1, 2, 0)) == "GEN 2:0"
assert str(VerseRef(2, 4, 6)) == "EXO 4:6"
assert str(VerseRef("LEV", "4", "6b-7a")) == "LEV 4:6b-7a"
def test_simplify() -> None:
vref = VerseRef()
vref.simplify()
assert vref == VerseRef()
vref = VerseRef.from_string("EXO 6:0")
vref.simplify()
assert vref == VerseRef.from_string("EXO 6:0")
vref = VerseRef.from_string("EXO 6:5b-18a,19")
vref.simplify()
assert vref == VerseRef.from_string("EXO 6:5")
vref = VerseRef.from_string("EXO 6:9a,9b")
vref.simplify()
assert vref == VerseRef.from_string("EXO 6:9")
vref = VerseRef.from_string("EXO 6:4-10")
vref.simplify()
assert vref == VerseRef.from_string("EXO 6:4")
vref = VerseRef.from_string("EXO 6:150monkeys")
vref.simplify()
assert vref == VerseRef.from_string("EXO 6:150")
def test_unbridge() -> None:
assert VerseRef().unbridge() == VerseRef()
assert VerseRef.from_string("EXO 6:0").unbridge() == VerseRef.from_string("EXO 6:0")
assert VerseRef.from_string("EXO 6:5b-18a,19").unbridge() == VerseRef.from_string("EXO 6:5b")
assert VerseRef.from_string("EXO 6:9a,9b").unbridge() == VerseRef.from_string("EXO 6:9a")
assert VerseRef.from_string("EXO 6:4-10").unbridge() == VerseRef.from_string("EXO 6:4")
assert VerseRef.from_string("EXO 6:150monkeys").unbridge() == VerseRef.from_string("EXO 6:150monkeys")
``` |
{
"source": "JohnMLarkin/Colab-for-Physics-Lab-Tools",
"score": 3
} |
#### File: Colab-for-Physics-Lab-Tools/whit_phys_util/create_pdf.py
```python
import os
import shutil
from notebook import notebookapp
import nbformat
from requests import get
from .git_access import LocalRepo
from .git_access._run_cmd import run_cmd
def notebook_to_pdf(repo=None,author=None,title=None):
for srv in notebookapp.list_running_servers():
try:
if srv['token'] == '' and not srv['password']:
server = srv['url'] + 'api/sessions'
else:
server = srv['url'] + 'api/sessions?token=' + srv['token']
nb_server_info = get(server).json()[0]
except:
pass
nb_loc = nb_server_info['path']
if 'fileId=' in nb_loc: # Looks like we are in Colab
nb_name = nb_server_info['name']
from google.colab import files
gdrive_home = '/content/drive/MyDrive'
nb_path = gdrive_home + '/Colab Notebooks/'
tmp_path = '/tmp'
# If the drive is not already mounted, attempt to mount it
if not os.path.isdir(gdrive_home):
from google.colab import drive
drive.mount('/content/drive')
if 'fileId=https%3A%2F%2Fgithub.com%2F' in nb_loc: # and file on GitHub
print('Note: Conversion will be performed on the most recent commit of this notebook on GitHub, not the working copy.')
nb_name_us = nb_name.replace("%20","_")
nb_name_us = nb_name_us.replace("-","_")
nb_name_us = nb_name_us.replace(":","_")
nb_name_us = nb_name_us.replace("'","_")
# nb_name = nb_name.replace("%20"," ")
nb_name = nb_name_us
if isinstance(repo,LocalRepo):
repo.pull()
shutil.copy(os.path.join(repo.repo_path, nb_name_us), os.path.join(tmp_path, nb_name))
else:
print('Please pass a GitHub repo object as an argument.')
return
else:
if not os.path.isfile(os.path.join(nb_path, nb_name)):
raise ValueError(f"file '{nb_name}' not found in path '{nb_path}'")
else:
shutil.copy(os.path.join(nb_path, nb_name), os.path.join(tmp_path, nb_name))
# If PDF with the same name exists, remove it
nb_file = os.path.join(tmp_path, nb_name)
pdf_file = os.path.join(tmp_path, nb_name.split(".")[0] + ".pdf")
if os.path.isfile(pdf_file):
os.remove(pdf_file)
# Add author(s) and title to notebook metadata (temp version only)
tmp_nb = nbformat.read(nb_file, nbformat.NO_CONVERT)
if title is not None:
tmp_nb['metadata']['title'] = title
if author is not None:
if isinstance(author,str):
tmp_nb['metadata']['authors'] = [{"name": author}]
elif isinstance(author,list):
author_list = []
for person in author:
author_list.append({"name": person})
tmp_nb['metadata']['authors'] = author_list
nbformat.write(tmp_nb,nb_file)
# Install the packages required for conversion
print("Installing required packages. This often takes 1-2 minutes.")
run_cmd("apt update >> /dev/null && apt install texlive-xetex texlive-fonts-recommended texlive-generic-recommended >> /dev/null")
# Attempt to convert to PDF (via LaTeX)
print(f"Preparing to convert '{nb_name}'")
run_cmd(f"jupyter nbconvert --output-dir='{tmp_path}' '{nb_file}' --to pdf",verbose=True)
# Attempt to download
files.download(pdf_file)
else:
print('Sorry. Only implemented for Colab.')
return
``` |
{
"source": "JohnMLarkin/PS453-Magnetization-1-Template",
"score": 2
} |
#### File: JohnMLarkin/PS453-Magnetization-1-Template/siglent_oscillscope.py
```python
import time
import pandas as pd
import numpy as np
class SiglentOscilloscope:
def __init__(self, resource_manager):
resources = resource_manager.list_resources()
for visa_addr in resources:
if ('USB' in visa_addr) and ('SDS' in visa_addr):
self.instr = resource_manager.open_resource(visa_addr)
addr_expand = visa_addr.split('::')
if self.instr is not None:
id_string = self.instr.query('*IDN?')
id_expand = id_string.split(',')
if 'SIGLENT' in id_expand[0]:
self.model = id_expand[1]
self.sn = id_expand[2]
print('Connected to', self.model,'with S/N', self.sn)
self.echo_command(False)
else:
raise Exception("Failed to connect to a Siglent instrument")
def echo_command(self, enable):
if enable:
self.instr.write('COMM_HEADER LONG')
self.quiet = False
else:
self.instr.write('COMM_HEADER OFF')
self.quiet = True
def close(self):
time.sleep(0.01) # delay to give time for previous command to finish
self.instr.close()
def __del__(self):
self.close()
def set_coupling(self, channel: int, coupling: str):
if coupling in ['AC', 'DC']:
self.instr.write('C%d:COUPLING %c1M' % (channel, coupling[0]))
else:
raise Exception("Invalid coupling string. Only 'AC' or 'DC' are permitted.")
def set_offset(self, channel: int, value: float, units: str):
if units in ['V','mV','uV']:
self.instr.write('C%d:OFFSET %f%s' % (channel, value, units))
else:
raise Exception("Invalid units string. Only 'V', 'mV', or 'uV' are permitted.")
def get_offset(self, channel):
if channel in [1,2]:
if (self.quiet):
volt_offset = self.instr.query('C%d:OFFSET?' % channel)
return float(volt_offset)
def set_volt_per_div(self, channel: int, value: float, units: str):
if units in ['V','mV','uV']:
self.instr.write('C%d:VOLT_DIV %f%s' % (channel, value, units))
else:
raise Exception("Invalid units string. Only 'V', 'mV', or 'uV' are permitted.")
def get_volt_per_div(self, channel):
if channel in [1,2]:
if (self.quiet):
volt_per_div = self.instr.query('C%d:VOLT_DIV?' % channel)
return float(volt_per_div)
def set_time_per_div(self, value: float, units: str):
units = units.upper()
unit_opts = ['NS','US','MS','S']
val_opts = [1, 2.5, 5, 10, 25, 50, 100, 250, 500]
if (units in unit_opts) and (value in val_opts):
self.instr.write('TIME_DIV %.1f%s' % (value, units))
else:
if not (units in unit_opts):
raise Exception("Invalid units string. Only 'ns', 'us', 'ms' and 's' are permitted.")
if not (value in val_opts):
raise Exception("Illegal value. Only 1, 2.5, 5, 10, 25, 50, 100, 250, or 500 are permitted.")
def get_time_per_div(self):
if (self.quiet):
time_per_div = self.instr.query('TIME_DIV?')
return float(time_per_div)
def set_trigger_delay(self, value: float, units: str):
units = units.upper()
unit_opts = ['NS','US','MS','S']
if (units in unit_opts):
self.instr.write('TRIG_DELAY %.1f%s' % (value, units))
else:
raise Exception("Invalid units string. Only 'ns', 'us', 'ms' and 's' are permitted.")
def get_trigger_delay(self):
if (self.quiet):
trig_delay_string = self.instr.query('TRIG_DELAY?')
if ('n' in trig_delay_string):
trig_delay = float(trig_delay_string.split('n')[0])*1e-9
elif ('u' in trig_delay_string):
trig_delay = float(trig_delay_string.split('u')[0])*1e-6
elif ('m' in trig_delay_string):
trig_delay = float(trig_delay_string.split('m')[0])*1e-3
else:
trig_delay = float(trig_delay_string.split('s')[0])
return trig_delay
def set_trigger_coupling(self, channel, coupling: str):
ch_opts = [1, 2, 'EX']
coupling_opts = ['AC', 'DC', 'HFREJ','LFREJ']
if (channel in ch_opts) and (coupling in coupling_opts):
if channel in [1,2]:
self.instr.write('C%d:TRIG_COUPLING %s' % (channel, coupling))
else:
self.instr.write('%s:TRIG_COUPLING %s' % (channel, coupling))
else:
if not (channel in ch_opts):
raise Exception("Invalid channel. Only 1, 2 or 'EX' are permitted.")
if not (coupling in coupling_opts):
raise Exception("Invalid coupling option. Only 'AC', 'DC', 'HFREJ', and 'LFREJ' are permitted.")
def set_trigger_level(self, channel, value, units: str):
ch_opts = [1, 2, 'EX']
unit_opts = ['V','mV','uV']
if (channel in ch_opts) and (units in unit_opts):
if channel in [1,2]:
self.instr.write('C%d:TRIG_LEVEL %.2f%s' % (channel, value, units))
else:
self.instr.write('%s:TRIG_LEVEL %.2f%s' % (channel, value, units))
else:
if not (channel in ch_opts):
raise Exception("Invalid channel. Only 1, 2 or 'EX' are permitted.")
if not (units in unit_opts):
raise Exception("Invalid units. Only 'V', 'mV', and 'uV' are permitted.")
def set_trigger_mode(self, mode: str):
mode_opts = ['AUTO','NORM','SINGLE']
if (mode in mode_opts):
self.instr.write('TRIG_MODE %s' % mode)
else:
raise Exception("Invalid mode. Only 'AUTO', 'NORM', and 'SINGLE' are permitted.")
def set_trigger_slope(self, channel, slope: str):
ch_opts = [1, 2, 'EX']
slope_opts = ['NEG','POS','WINDOW']
if (channel in ch_opts) and (slope in slope_opts):
if channel in [1,2]:
self.instr.write('C%d:TRIG_SLOPE %s' % (channel, slope))
else:
self.instr.write('%s:TRIG_SLOPE %s' % (channel, slope))
else:
if not (channel in ch_opts):
raise Exception("Invalid channel. Only 1, 2 or 'EX' are permitted.")
if not (slope in slope_opts):
raise Exception("Invalid slope option. Only 'NEG', 'POS', or 'WINDOW' are permitted.")
def get_sample_rate(self):
if (self.quiet):
rate_string = self.instr.query('SAMPLE_RATE?')
if ('G' in rate_string):
sample_rate = float(rate_string.split('M')[0])*1e9
elif ('M' in rate_string):
sample_rate = float(rate_string.split('M')[0])*1e6
elif ('K' in rate_string):
sample_rate = float(rate_string.split('K')[0])*1e3
else:
sample_rate = float(rate_string.split('Sa')[0])
return sample_rate
def get_sample_length(self, channel):
if channel in [1,2]:
if (self.quiet):
length_string = self.instr.query('SAMPLE_NUM? C%d' % channel)
return int(length_string)
def get_wave(self, channel):
if not self.quiet:
self.echo_command(False)
chatty = True
else:
chatty = False
self.instr.timeout = 3000 # wait up to 3 s
self.instr.chunk_size = 20*1024*1024
rate = self.get_sample_rate()
length = self.get_sample_length(channel)
self.instr.write('C%d:WF? DAT2' % channel)
recv = self.instr.read_raw()
head = recv[0:3].decode()
num_bytes = int(recv[6:15].decode())
tail = recv[-2:].hex()
wave_bytes = recv[15:-2]
# Integrity check
if (head == 'ALL') and (num_bytes == len(wave_bytes)) and (tail == '0a0a'):
# only export data visible on oscilloscope screen
volts = np.empty(length, dtype=float)
t = np.empty(length, dtype=float)
start_byte = int(num_bytes/2-length/2)
volt_per_div = self.get_volt_per_div(channel)
volt_offset = self.get_offset(channel)
for i in range(length):
if wave_bytes[i+start_byte] > 127:
volts[i] = (wave_bytes[i + start_byte]-256)/25*volt_per_div-volt_offset
else:
volts[i] = wave_bytes[i + start_byte]/25*volt_per_div-volt_offset
t[i] = (i-length/2)/rate
else:
raise Exception('Invalid wave data received from oscilloscope.')
data = pd.DataFrame({'Time (s)': t, 'Volts (V)': volts})
if chatty:
self.echo_command(True)
return data
def query(self,cmd_string):
response = self.instr.query(cmd_string)
print(response[:-1])
return response[:-1]
def read_raw(self):
response = self.instr.read_raw()
return response
def command(self,cmd_string):
self.instr.write(cmd_string)
``` |
{
"source": "john-m-liu/logkeeper",
"score": 2
} |
#### File: john-m-liu/logkeeper/break_up_large_documents.py
```python
import pymongo
import sys
import datetime
from optparse import OptionParser
# Inserts 3 global logs:
# Log 1: one line with 3MB of "1"
# Log 2: one line with 3MB of "2", one line with 3MB of "3"
# Log 3: one line with 3MB of "4"
# Checks that these become:
# Log 1: one line with 3MB of "1"
# Log 2: one line with 3MB of "2"
# Log 3: one line with 3MB of "3"
# Log 4: one line with 3MB of "4"
# Checks that all other log fields and the build seq are correct
# Repeats for non-global logs
def test():
connection.drop_database("logkeeper_test")
db = connection.logkeeper_test
builds = db.builds
tests = db.tests
logs = db.logs
line_size = 3 * 1024 * 1024 # 3MB
dates = [datetime.datetime(2015, 1, 1, 0, 0, 0, 0),
datetime.datetime(2015, 2, 1, 0, 0, 0, 0),
datetime.datetime(2015, 2, 1, 0, 0, 0, 0),
datetime.datetime(2015, 3, 1, 0, 0, 0, 0)]
builds.insert_one({"name": "global_build", "seq": 3})
global_build = builds.find_one({"name": "global_build"})
logs.insert_one({
"build_id": global_build["_id"],
"seq": 1,
"started": dates[0],
"lines": [[datetime.datetime.utcnow(), "1" * line_size]]})
logs.insert_one({
"build_id": global_build["_id"],
"seq": 2,
"started": dates[1],
"lines": [
[datetime.datetime.utcnow(), "2" * line_size],
[datetime.datetime.utcnow(), "3" * line_size]]})
logs.insert_one({
"build_id": global_build["_id"],
"seq": 3,
"started": dates[3],
"lines": [[datetime.datetime.utcnow(), "4" * line_size]]})
builds.insert_one({"name": "build", "seq": 0})
build = builds.find_one({"name": "build"})
tests.insert_one({"name": "test", "build_id": build["_id"], "seq": 3})
test = tests.find_one({"name": "test"})
logs.insert_one({
"build_id": build["_id"],
"test_id": test["_id"],
"seq": 1,
"started": dates[0],
"lines": [[datetime.datetime.utcnow(), "1" * line_size]]})
logs.insert_one({
"build_id": build["_id"],
"test_id": test["_id"],
"seq": 2,
"started": dates[1],
"lines": [
[datetime.datetime.utcnow(), "2" * line_size],
[datetime.datetime.utcnow(), "3" * line_size]]})
logs.insert_one({
"build_id": build["_id"],
"test_id": test["_id"],
"seq": 3,
"started": dates[3],
"lines": [[datetime.datetime.utcnow(), "4" * line_size]]})
break_up_large_documents(builds, tests, logs)
if builds.find_one({"_id": global_build["_id"]})["seq"] != 4:
print "Global build has wrong seq"
return
seq = 1
for log in logs.find({"build_id": global_build["_id"], "test_id": None}).sort("seq",
pymongo.ASCENDING):
if log["seq"] != seq:
print "Global log ", seq, " has wrong seq: ", log["seq"]
return
if log["started"] != dates[seq - 1]:
print "Global log ", seq, " has wrong started: ", log["started"]
return
if len(log["lines"]) != 1:
print "Global log ", seq, " has wrong number of lines: ", len(log["lines"])
return
if log["lines"][0][1][0] != str(seq):
print "Global log ", seq, " line has wrong first char: ", log["lines"][0][1][0]
return
seq = seq + 1
if builds.find_one({"_id": build["_id"]})["seq"] != 0:
print "Build has wrong seq"
return
if tests.find_one({"_id": test["_id"]})["seq"] != 4:
print "Test has wrong seq"
return
seq = 1
for log in logs.find({"build_id": build["_id"], "test_id": test["_id"]}).sort("seq",
pymongo.ASCENDING):
if log["seq"] != seq:
print "Log ", seq, " has wrong seq: ", log["seq"]
return
if log["started"] != dates[seq - 1]:
print "Log ", seq, " has wrong started: ", log["started"]
return
if len(log["lines"]) != 1:
print "Log ", seq, " has wrong number of lines: ", len(log["lines"])
return
if log["lines"][0][1][0] != str(seq):
print "Log ", seq, " line has wrong first char: ", log["lines"][0][1][0]
return
seq = seq + 1
print "All tests passed"
connection.drop_database("logkeeper_test")
# Breaks up logs over 4MB
def break_up_large_documents(builds, tests, logs):
max_size = 4 * 1024 * 1024 # 4MB
i = 0
for log in logs.find().sort("_id", pymongo.ASCENDING):
if i % 10000 == 0 and i > 0:
print "Checked ", i, " logs, now checking log with _id:", log["_id"]
i = i + 1
# Test if the log is too large
size = 0
for line in log["lines"]:
size += len(line[1])
if size <= max_size:
continue
print "Breaking up log"
print "\t_id:", log["_id"]
print "\tbuild_id:", log["build_id"]
if "test_id" in log.keys():
print "\ttest_id", log["test_id"]
# Initialize a list of new logs
# New log seq values will begin at log["seq"]
seq = log["seq"]
new_log = {"build_id": log["build_id"],
"seq": seq,
"started": log["started"],
"lines": []}
if "test_id" in log.keys():
new_log["test_id"] = log["test_id"]
new_log_size = 0
new_logs = [new_log]
# Break up lines of log into 4MB chunks
for line in log["lines"]:
# Check if new_log is full, and if so, create a new log
if new_log_size + len(line[1]) > max_size:
seq += 1
new_log = {"build_id": log["build_id"],
"seq": seq,
"started": log["started"],
"lines": []}
if "test_id" in log.keys():
new_log["test_id"] = log["test_id"]
new_log_size = 0
new_logs.append(new_log)
# Add the line to the current new_log and update its size
new_log["lines"].append(line)
new_log_size = new_log_size + len(line[1])
# Number of logs we have added
inc = seq - log["seq"]
# Increment seq values of later logs
logs.update_many({
"build_id": log["build_id"],
"test_id": log.get("test_id"),
"seq": {"$gt": log["seq"]}},
{"$inc": {"seq": inc}})
# Increment seq value of test
if "test_id" in log.keys():
tests.update_one({"_id": log["test_id"]}, {"$inc": {"seq": inc}})
else:
# Increment seq value of build
builds.update_one({"_id": log["build_id"]}, {"$inc": {"seq": inc}})
# Replace log with list of new logs
logs.insert_many(new_logs)
logs.delete_one({"_id": log["_id"]})
parser = OptionParser()
parser.add_option("--test", dest="test", action="store_true", default=False)
parser.add_option("--host", dest="host", default="localhost")
(options, args) = parser.parse_args()
connection = pymongo.MongoClient("mongodb://" + options.host)
if options.test:
test()
else:
db = connection.buildlogs
break_up_large_documents(db.builds, db.tests, db.logs)
``` |
{
"source": "john-m-liu/mongo",
"score": 2
} |
#### File: resmokelib/testing/executor.py
```python
from __future__ import absolute_import
import threading
import time
from . import fixtures
from . import hook_test_archival as archival
from . import hooks as _hooks
from . import job as _job
from . import report as _report
from . import testcases
from .. import config as _config
from .. import errors
from .. import utils
from ..core import network
from ..utils import queue as _queue
class TestSuiteExecutor(object): # pylint: disable=too-many-instance-attributes
"""Execute a test suite.
Responsible for setting up and tearing down the fixtures that the
tests execute against.
"""
_TIMEOUT = 24 * 60 * 60 # =1 day (a long time to have tests run)
def __init__( # pylint: disable=too-many-arguments
self, exec_logger, suite, config=None, fixture=None, hooks=None, archive_instance=None,
archive=None):
"""Initialize the TestSuiteExecutor with the test suite to run."""
self.logger = exec_logger
if _config.SHELL_CONN_STRING is not None:
# Specifying the shellConnString command line option should override the fixture
# specified in the YAML configuration to be the no-op fixture.
self.fixture_config = {"class": fixtures.NOOP_FIXTURE_CLASS}
else:
self.fixture_config = fixture
self.hooks_config = utils.default_if_none(hooks, [])
self.test_config = utils.default_if_none(config, {})
self.archival = None
if archive_instance:
self.archival = archival.HookTestArchival(suite, self.hooks_config, archive_instance,
archive)
self._suite = suite
self.test_queue_logger = self.logger.new_testqueue_logger(suite.test_kind)
# Only start as many jobs as we need. Note this means that the number of jobs we run may
# not actually be _config.JOBS or self._suite.options.num_jobs.
jobs_to_start = self._suite.options.num_jobs
self.num_tests = len(suite.tests) * self._suite.options.num_repeat_tests
if self.num_tests < jobs_to_start:
self.logger.info(
"Reducing the number of jobs from %d to %d since there are only %d test(s) to run.",
self._suite.options.num_jobs, self.num_tests, self.num_tests)
jobs_to_start = self.num_tests
# Must be done after getting buildlogger configuration.
self._jobs = [self._make_job(job_num) for job_num in xrange(jobs_to_start)]
def run(self):
"""Execute the test suite.
Any exceptions that occur during setting up or tearing down a
fixture are propagated.
"""
self.logger.info("Starting execution of %ss...", self._suite.test_kind)
return_code = 0
# The first run of the job will set up the fixture.
setup_flag = threading.Event()
# We reset the internal state of the PortAllocator so that ports used by the fixture during
# a test suite run earlier can be reused during this current test suite.
network.PortAllocator.reset()
teardown_flag = None
try:
num_repeat_suites = self._suite.options.num_repeat_suites
while num_repeat_suites > 0:
test_queue = self._make_test_queue()
partial_reports = [job.report for job in self._jobs]
self._suite.record_test_start(partial_reports)
# Have the Job threads destroy their fixture during the final repetition after they
# finish running their last test. This avoids having a large number of processes
# still running if an Evergreen task were to time out from a hang/deadlock being
# triggered.
teardown_flag = threading.Event() if num_repeat_suites == 1 else None
(report, interrupted) = self._run_tests(test_queue, setup_flag, teardown_flag)
self._suite.record_test_end(report)
if setup_flag and setup_flag.is_set():
self.logger.error("Setup of one of the job fixtures failed")
return_code = 2
return
# Remove the setup flag once the first suite ran.
setup_flag = None
# If the user triggered a KeyboardInterrupt, then we should stop.
if interrupted:
raise errors.UserInterrupt("Received interrupt from user")
if teardown_flag and teardown_flag.is_set():
return_code = 2
sb = [] # String builder.
self._suite.summarize_latest(sb)
self.logger.info("Summary: %s", "\n ".join(sb))
if not report.wasSuccessful():
return_code = 1
if self._suite.options.fail_fast:
break
test_report = report.as_dict()
test_results_num = len(test_report["results"])
# There should be at least as many tests results as expected number of tests.
if test_results_num < self.num_tests:
raise errors.ResmokeError("{} reported tests is less than {} expected tests"
.format(test_results_num, self.num_tests))
# Clear the report so it can be reused for the next execution.
for job in self._jobs:
job.report.reset()
num_repeat_suites -= 1
finally:
if not teardown_flag:
if not self._teardown_fixtures():
return_code = 2
self._suite.return_code = return_code
def _run_tests(self, test_queue, setup_flag, teardown_flag):
"""Start a thread for each Job instance and block until all of the tests are run.
Returns a (combined report, user interrupted) pair, where the
report contains the status and timing information of tests run
by all of the threads.
"""
threads = []
interrupt_flag = threading.Event()
user_interrupted = False
try:
# Run each Job instance in its own thread.
for job in self._jobs:
thr = threading.Thread(target=job, args=(test_queue, interrupt_flag), kwargs=dict(
setup_flag=setup_flag, teardown_flag=teardown_flag))
# Do not wait for tests to finish executing if interrupted by the user.
thr.daemon = True
thr.start()
threads.append(thr)
# SERVER-24729 Need to stagger when jobs start to reduce I/O load if there
# are many of them. Both the 5 and the 10 are arbitrary.
# Currently only enabled on Evergreen.
if _config.STAGGER_JOBS and len(threads) >= 5:
time.sleep(10)
joined = False
while not joined:
# Need to pass a timeout to join() so that KeyboardInterrupt exceptions
# are propagated.
joined = test_queue.join(TestSuiteExecutor._TIMEOUT)
except (KeyboardInterrupt, SystemExit):
interrupt_flag.set()
user_interrupted = True
else:
# Only wait for all the Job instances if not interrupted by the user.
for thr in threads:
thr.join()
reports = [job.report for job in self._jobs]
combined_report = _report.TestReport.combine(*reports)
# We cannot return 'interrupt_flag.is_set()' because the interrupt flag can be set by a Job
# instance if a test fails and it decides to drain the queue. We only want to raise a
# StopExecution exception in TestSuiteExecutor.run() if the user triggered the interrupt.
return (combined_report, user_interrupted)
def _teardown_fixtures(self):
"""Tear down all of the fixtures.
Returns true if all fixtures were torn down successfully, and
false otherwise.
"""
success = True
for job in self._jobs:
if not job.teardown_fixture():
self.logger.warning("Teardown of %s of job %s was not successful", job.fixture,
job.job_num)
success = False
return success
def _make_fixture(self, job_num, job_logger):
"""Create a fixture for a job."""
fixture_config = {}
fixture_class = fixtures.NOOP_FIXTURE_CLASS
if self.fixture_config is not None:
fixture_config = self.fixture_config.copy()
fixture_class = fixture_config.pop("class")
fixture_logger = job_logger.new_fixture_logger(fixture_class)
return fixtures.make_fixture(fixture_class, fixture_logger, job_num, **fixture_config)
def _make_hooks(self, fixture):
"""Create the hooks for the job's fixture."""
hooks = []
for hook_config in self.hooks_config:
hook_config = hook_config.copy()
hook_class = hook_config.pop("class")
hook_logger = self.logger.new_hook_logger(hook_class, fixture.logger)
hook = _hooks.make_hook(hook_class, hook_logger, fixture, **hook_config)
hooks.append(hook)
return hooks
def _make_job(self, job_num):
"""Return a Job instance with its own fixture, hooks, and test report."""
job_logger = self.logger.new_job_logger(self._suite.test_kind, job_num)
fixture = self._make_fixture(job_num, job_logger)
hooks = self._make_hooks(fixture)
report = _report.TestReport(job_logger, self._suite.options)
return _job.Job(job_num, job_logger, fixture, hooks, report, self.archival,
self._suite.options, self.test_queue_logger)
def _make_test_queue(self):
"""Return a queue of TestCase instances.
Use a multi-consumer queue instead of a unittest.TestSuite so
that the test cases can be dispatched to multiple threads.
"""
# Put all the test cases in a queue.
queue = _queue.Queue()
for _ in range(self._suite.options.num_repeat_tests):
for test_name in self._suite.tests:
test_case = testcases.make_test_case(self._suite.test_kind, self.test_queue_logger,
test_name, **self.test_config)
queue.put(test_case)
# Add sentinel value for each job to indicate when there are no more items to process.
for _ in xrange(len(self._jobs)):
queue.put(None)
return queue
``` |
{
"source": "john-mlr/CLD-UnsupervisedLearning",
"score": 3
} |
#### File: CLD-UnsupervisedLearning/models/resnet_cifar.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.normalize import Normalize
from torch.autograd import Variable
from torch.nn import Parameter
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, low_dim=128, medium_dim=128, mlp=False, pool_len=4, normlinear=False):
super(ResNet, self).__init__()
self.pool_len = pool_len
self.in_planes = 64
linear_layer = NormedLinear if normlinear else nn.Linear
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.mlp = mlp
if self.mlp:
self.pre_fc = nn.Sequential(
nn.Linear(512*block.expansion, medium_dim),
)
self.linear = linear_layer(medium_dim, low_dim)
self.l2norm = Normalize(2)
else:
self.linear = linear_layer(512*block.expansion, low_dim)
self.l2norm = Normalize(2)
self.groupDis = nn.Sequential(
linear_layer(512*block.expansion, low_dim),
Normalize(2))
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, two_branch=False):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, self.pool_len)
out_ = out.view(out.size(0), -1)
if self.mlp:
out_ = self.pre_fc(out_)
out = self.linear(out_)
out = self.l2norm(out)
if two_branch:
out_2 = self.groupDis(out_)
return out, out_2
return out
def ResNet18(low_dim=128, medium_dim=128, mlp=False, pool_len=4, normlinear=False):
return ResNet(BasicBlock, [2,2,2,2], low_dim, medium_dim=medium_dim, mlp=mlp, pool_len=pool_len, normlinear=normlinear)
def test():
net = ResNet18()
y = net(Variable(torch.randn(1,3,32,32)))
print(y.size())
```
#### File: john-mlr/CLD-UnsupervisedLearning/train_cifar_npid_cld.py
```python
from __future__ import print_function
import sys
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import lib.custom_transforms as custom_transforms
import os
import argparse
import time
import models
import datasets
import math
import glob
from lib.NCEAverage import NCEAverage
from lib.LinearAverage import LinearAverage
from lib.NCECriterion import NCECriterion
from lib.utils import AverageMeter
from test import NN, kNN
from spectral_clustering import spectral_clustering, pairwise_cosine_similarity, KMeans
from lib.lr_scheduler import get_scheduler
from torch.nn.parallel import DistributedDataParallel
from datasets.dataloader import get_dataloader
import datetime
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--resume', '-r', default='', type=str, help='resume from checkpoint')
parser.add_argument('--test-only', action='store_true', help='test only')
parser.add_argument('--low-dim', default=128, type=int,
metavar='D', help='feature dimension')
parser.add_argument('--nce-k', default=4096, type=int,
metavar='K', help='number of negative samples for NCE')
parser.add_argument('--nce-t', default=0.1, type=float,
metavar='T', help='temperature parameter for softmax')
parser.add_argument('--nce-m', default=0.5, type=float,
metavar='M', help='momentum for non-parametric updates')
parser.add_argument('--save-dir', default='checkpoint/', type=str, help='path to save checkpoint')
parser.add_argument('--dataset', default='cifar10', type=str, help='datasets to train')
parser.add_argument('--save-interval', default=100, type=int,
help='interval for saving scheckpoint')
parser.add_argument('--epochs', default=200, type=int, help='number of training epochs')
parser.add_argument('--lr', default=0.03, type=float, help='learning rate')
parser.add_argument('--lr-scheduler', type=str, default='cosine',
choices=["step", "cosine"], help="learning rate scheduler")
parser.add_argument('--warmup-epoch', type=int, default=5, help='warmup epoch')
parser.add_argument('--warmup-multiplier', type=int, default=100, help='warmup multiplier')
parser.add_argument('--lr-decay-epochs', type=int, default=[120, 160, 200], nargs='+',
help='for step scheduler. where to decay lr, can be a list')
parser.add_argument('--lr-decay-rate', type=float, default=0.1,
help='for step scheduler. decay rate for learning rate')
parser.add_argument('--weight-decay', type=float, default=5e-4, help='weight decay')
parser.add_argument('--batch-size', default=128, type=int,
help='batch size of each iteration')
parser.add_argument('--recompute-memory', action='store_true', help='whether recomputer memory bank')
parser.add_argument('--clusters', default=10, type=int,
help='num of clusters for clustering')
parser.add_argument('--k_eigen', default=10, type=int,
help='num of eigenvectors for k-way normalized cuts')
parser.add_argument('--cld_t', default=0.07, type=float,
help='temperature for clustering')
parser.add_argument('--two-imgs', action='store_true', help='Whether use two randomly processed views')
parser.add_argument('--three-imgs', action='store_true', help='Whether use three randomly processed views')
parser.add_argument('--use-kmeans', action='store_true', help='Whether use k-means for clustering. Use normalized cuts if it is False')
parser.add_argument('--num_iters', default=20, type=int,
help='num of iters for clustering')
parser.add_argument('--Lambda', default=1.0, type=float, help='Lambda for groupDis branch')
# misc
parser.add_argument("--local_rank", type=int, help='local rank for DistributedDataParallel')
parser.add_argument("--rng-seed", type=int, default=0, help='manual seed')
parser.add_argument("--amp", action="store_true",
help="use 16-bit (mixed) precision through NVIDIA apex AMP")
parser.add_argument("--opt-level", type=str, default="O1",
help="apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--static-loss-scale', type=float, default=1,
help='Static loss scale, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--dynamic-loss-scale', action='store_true',
help='Use dynamic loss scaling. If supplied, this argument supersedes ' +
'--static-loss-scale.')
parser.add_argument("--num_workers", type=int, default=2, help='number of workers for data loader')
args = parser.parse_args()
args.lr = args.batch_size / 128.0 * args.lr
print('INFO CONFIG IS: ', args)
if args.amp:
try:
# noinspection PyUnresolvedReferences
from apex import amp
except ImportError:
amp = None
def write_log(args, file_name, epoch, key, top1, top5):
acc_file = open(os.path.join(args.save_dir, file_name), 'a')
# Append accuracy to txt file
acc_file.write('Epoch {} {}: top-1 {:.2f} top5 {:.2f}\n'.format(epoch, key, top1*100., top5*100.))
# Close the file
acc_file.close()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
best_acc1 = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# Data
print('==> Preparing data..')
trainloader, testloader, ndata = get_dataloader(args)
print('==> Building model..')
net = models.__dict__['ResNet18'](low_dim=args.low_dim, pool_len=args.pool_len)
# define leminiscate
if args.nce_k > 0:
lemniscate = NCEAverage(args.low_dim, ndata, args.nce_k, args.nce_t, args.nce_m)
else:
lemniscate = LinearAverage(args.low_dim, ndata, args.nce_t, args.nce_m)
net.to(device)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay)
if device == 'cuda':
if args.opt_level != "O0":
if amp is None:
print("apex is not installed but amp_opt_level is set to {args.amp_opt_level}, ignoring.\n"
"you should install apex from https://github.com/NVIDIA/apex#quick-start first")
args.opt_level = "O0"
else:
net, optimizer = amp.initialize(net, optimizer, opt_level=args.opt_level)
net = DistributedDataParallel(net, device_ids=[args.local_rank], broadcast_buffers=False)
cudnn.benchmark = True
scheduler = get_scheduler(optimizer, len(trainloader), args)
# Model
if args.test_only or len(args.resume)>0:
# Load checkpoint.
print('==> Resuming from checkpoint..')
checkpoint = torch.load(args.resume)
net.load_state_dict(checkpoint['net'])
lemniscate = checkpoint['lemniscate']
best_acc1 = checkpoint['acc']
start_epoch = checkpoint['epoch']
# define loss function
if hasattr(lemniscate, 'K'):
criterion = NCECriterion(ndata)
else:
criterion = nn.CrossEntropyLoss()
criterion_cld = nn.CrossEntropyLoss()
criterion_cld.to(device)
lemniscate.to(device)
criterion.to(device)
if args.test_only:
acc = kNN(0, net, lemniscate, trainloader, testloader, 200, args.nce_t, 1)
sys.exit(0)
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
torch.set_num_threads(1)
if args.lr_scheduler == 'cosine':
trainloader.sampler.set_epoch(epoch)
train_loss = AverageMeter()
data_time = AverageMeter()
batch_time = AverageMeter()
train_CLD_loss = AverageMeter()
train_CLD_acc = AverageMeter()
# switch to train mode
net.train()
end = time.time()
for batch_idx, (inputs, targets, indexes) in enumerate(trainloader):
data_time.update(time.time() - end)
targets, indexes = targets.to(device), indexes.to(device)
# If two_imgs: one is used for F1, another is used for F2. F1 comes from branch1 of net
# F2 comes from branch1 if only one branch exists else branch2.
if args.two_imgs:
inputs1 = inputs[0].to(device)
inputs2 = inputs[1].to(device)
else:
inputs1 = inputs.to(device)
optimizer.zero_grad()
features_insDis1, features_batchDis1 = net(inputs1, two_branch=True)
outputs1 = lemniscate(features_insDis1, indexes)
# NCE loss
insDis_loss = criterion(outputs1, indexes)
if args.two_imgs:
features_insDis2, features_batchDis2 = net(inputs2, two_branch=True)
outputs2 = lemniscate(features_insDis2, indexes)
# NCE loss
loss_nce_2 = criterion(outputs2, indexes)
insDis_loss = (insDis_loss + loss_nce_2)/2
# K-way normalized cuts or k-Means. Default: k-Means
if args.use_kmeans:
cluster_label1, centroids1 = KMeans(features_batchDis1, K=args.clusters, Niters=args.num_iters)
cluster_label2, centroids2 = KMeans(features_batchDis2, K=args.clusters, Niters=args.num_iters)
else:
cluster_label1, centroids1 = spectral_clustering(features_batchDis1, K=args.k_eigen,
clusters=args.clusters, Niters=args.num_iters)
cluster_label2, centroids2 = spectral_clustering(features_batchDis2, K=args.k_eigen,
clusters=args.clusters, Niters=args.num_iters)
# instance-group discriminative learning
affnity1 = torch.mm(features_batchDis1, centroids2.t())
CLD_loss = criterion_cld(affnity1.div_(args.cld_t), cluster_label2)
affnity2 = torch.mm(features_batchDis2, centroids1.t())
CLD_loss = (CLD_loss + criterion_cld(affnity2.div_(args.cld_t), cluster_label1))/2
# get cluster label prediction accuracy
_, cluster_pred = torch.topk(affnity1, 1)
cluster_pred = cluster_pred.t()
correct = cluster_pred.eq(cluster_label2.view(1, -1).expand_as(cluster_pred))
correct_all = correct[0].view(-1).float().sum(0).mul_(100.0/inputs1.size(0))
train_CLD_acc.update(correct_all.item(), inputs1.size(0))
# total loss
loss = insDis_loss + args.Lambda*CLD_loss
if torch.isnan(loss):
print('INFO loss is nan! Backward skipped')
continue
# loss.backward()
if args.opt_level != "O0":
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
scheduler.step()
train_loss.update(loss.item(), inputs1.size(0))
train_CLD_loss.update(CLD_loss.item(), inputs1.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print info
lr = optimizer.param_groups[0]['lr']
if batch_idx % 10 == 0:
print('Epoch: [{}][{}/{}]'
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Data: {data_time.val:.3f} ({data_time.avg:.3f}) '
'lr: {:.6f} '
'Loss: {train_loss.val:.4f} ({train_loss.avg:.4f}) '
'CLD loss: {train_cld_loss.val:.4f} ({train_cld_loss.avg:.4f}) '
'Group acc: {train_CLD_acc.val:.4f} ({train_CLD_acc.avg:.4f})'.format(
epoch, batch_idx, len(trainloader), lr, batch_time=batch_time,
data_time=data_time, train_loss=train_loss, train_cld_loss=train_CLD_loss,
train_CLD_acc=train_CLD_acc))
num_files = glob.glob(args.save_dir + '/' + args.dataset + '_acc_train_cld*')
acc_file_name = args.dataset + '_acc_train_cld' + '_' + 'epochs_200' + '_' + str(len(num_files)) + '.txt'
for epoch in range(start_epoch, start_epoch+200):
train(epoch)
if epoch % 1 == 0:
if args.dataset == 'stl10-full':
acc1, acc5 = kNN(epoch, net, lemniscate, labeledTrainloader, testloader, 200, args.nce_t, True)
else:
acc1, acc5 = kNN(epoch, net, lemniscate, trainloader, testloader, 200, args.nce_t, args.recompute_memory)
write_log(args, acc_file_name, epoch, key='Acc', top1=acc1, top5=acc5)
if acc1 > best_acc1 or (epoch+1) % args.save_interval==0:
print('Saving..')
state = {
'net': net.state_dict(),
'lemniscate': lemniscate,
'acc': acc1,
'epoch': epoch,
}
if (epoch+1) % args.save_interval == 0:
file_name = "ckpt_{}_nce_t_{}_nce_k_{}_epoch_{}.t7".format(
args.dataset, str(args.nce_t), str(args.nce_k), str(epoch+1))
torch.save(state, os.path.join(args.save_dir,file_name))
if acc1 > best_acc1:
file_name = "ckpt_{}_nce_t_{}_nce_k_{}.t7".format(
args.dataset, str(args.nce_t), str(args.nce_k))
torch.save(state, os.path.join(args.save_dir,file_name))
best_acc1 = acc1
best_acc5 = acc5
print('best accuracy: {:.2f} {:.2f}'.format(best_acc1*100, best_acc5*100))
print(args)
if args.dataset == 'stl10-full':
acc1, acc5 = kNN(epoch, net, lemniscate, labeledTrainloader, testloader, 200, args.nce_t, True)
else:
acc1, acc5 = kNN(epoch, net, lemniscate, trainloader, testloader, 200, args.nce_t, True)
write_log(args, acc_file_name, epoch, key='Acc-best', top1=best_acc1, top5=best_acc5)
print('last accuracy: {:.2f} {:.2f}'.format(acc1*100, acc5*100))
print('best accuracy: {:.2f} {:.2f}'.format(best_acc1*100, best_acc5*100))
print(args)
``` |
{
"source": "john-montgomery2003/APIdesktopimage",
"score": 2
} |
#### File: APIdesktopimage/app/main.py
```python
from fastapi import FastAPI
from mangum import Mangum
app = FastAPI(title='Serverless Lambda FastAPI')
@app.get("/")
def main_endpoint_test():
return {"message": "Welcome CI/CD Pipeline with GitHub Actions!"}
@app.get("/haa")
def main_endpoint_test():
return {"message": "Welcome CI/CD Pipeline with GitHub Actions!"}
handler = Mangum(app=app)
``` |
{
"source": "john-montgomery2003/badFormatter",
"score": 3
} |
#### File: badFormatter/badFormatter/forms.py
```python
def read_file(filename):
return [line.replace('\n', '') for line in open(filename, 'r')]
def formatter(text):
form = '{:<' + str(max([len(line) for line in text]) + 5) + '}'
for idx, line in enumerate(text):
line = form.format(line)
to_add = []
new_line = []
for charidx, char in enumerate(line):
if char in '{}':
to_add.append(char)
new_line.append(' ')
elif char == ';' and len(set(line[charidx:])) == 2:
to_add.append(char)
new_line.append(' ')
else:
new_line.append(char)
if len(set(new_line)) == 1 or len(set(new_line)) == 0:
found, i = False, 1
while not found:
if len(text[idx - i]) > 1:
break
i += 1
text[idx - i] = ''.join(list(text[idx - i]) + to_add)
text[idx] = ''
else:
new_line += to_add
text[idx] = ''.join(new_line)
result = ''
for line in text:
if line:
result += line + '\n' if line else ''
return result
def shitify_print(filename):
print(formatter(read_file(filename)))
def shitify_return(filename):
return formatter(read_file(filename))
def shitify_print_text(text):
text = [line.replace('\n', '') for line in text.split('\n')]
print(formatter(text))
def shitify_return_text(text):
text = [line.replace('\n', '') for line in text.split('\n')]
return formatter(text)
``` |
{
"source": "john-montgomery2003/mrkdwn2html",
"score": 3
} |
#### File: john-montgomery2003/mrkdwn2html/mrkdwn2html copy.py
```python
import re
def mrkdwnEval(stringValue):
if re.search("^#.*", stringValue):
return "Head"
elif re.search("^\* ", stringValue):
return "List"
elif re.search(".*\[.*\]\(.*\).*", stringValue):
return "Link"
elif re.search(".*\\*\\*.*\\*\\*.*", stringValue) or re.search(".*__.*__.*", stringValue):
return "Bold"
else:
return "Para"
def mrkdwnTitle2html(stringValue):
for i in range(6,0,-1):
if re.search( (f"\A{i*'#'}" ), stringValue):
return f"<h{i}> {mrkdwn2html(stringValue.replace('#',''))} </h{i}>"
def mrkdwnLink2html(stringValue):
if re.search("^[.*](.*)$", stringValue):
text,link = [string.replace("[","").replace(")","") for string in stringValue.split("](")]
return f"<a href={link}>{text}</a>"
else:
before = stringValue.split("[")
text,link = before[1].split(")")[0].split("](")
after = stringValue.replace(f"{before[0]}[{text}]({link})","")
if mrkdwnEval(before[0]) != "Para":
before[0] = mrkdwn2html(before[0])
if mrkdwnEval(after) != "Para":
after = mrkdwn2html(after)
if mrkdwnEval(text) != "Para":
text = mrkdwn2html(text)
return f"{before[0]} <a href={link}>{text}</a> {after}"
def mrkdwnBold2html(stringValue):
if re.search("^\\*\\*.*\\*\\*$", stringValue) or re.search("^__.*__$", stringValue):
return f"<b>{stringValue.replace('*','').replace('__','')}</b>"
marker = "**" if "*" in stringValue else "__"
before, text, after = stringValue.split(marker,2)
if mrkdwnEval(before) != "Para":
before = mrkdwn2html(before)
if mrkdwnEval(after) != "Para":
after = mrkdwn2html(after)
if mrkdwnEval(after) != "Para":
after = mrkdwn2html(after)
return f"{before}<b>{text}</b>{after}"
def mrkdwn2html(mrkdwn):
html = ""
for string in mrkdwn.splitlines():
if string:
type = mrkdwnEval(string)
if type == "Para":
html = html + f"<p>{string}</p>"
elif type == "Link":
html = html + mrkdwnLink2html(string)
elif type == "Head":
html = html + mrkdwnTitle2html(string)
elif type == "Bold":
html = html + mrkdwnBold2html(string)
else:
html = html + f"<li>{string.replace('* ','')}</li>"
return html
```
#### File: john-montgomery2003/mrkdwn2html/mrkdwn2html.py
```python
import re
def mrkdwnEval(stringValue):
if re.search("^#.*", stringValue):
#The # is used to denote a header
return "Head"
elif re.search("^\* ", stringValue):
#The * is used to denote a UL - note that * character has to be escaped with / as it is a regex expression
return "List"
elif re.search(".*\[.*\]\(.*\).*", stringValue):
#The []() is used to denote a link - note that this can be inside of another Paragraph or other item
#The functions only goal is to identify the pressence in the string
return "Link"
elif re.search(".*\\*\\*.*\\*\\*.*", stringValue) or re.search(".*__.*__.*", stringValue):
#The ** or __ is used to denote bold - note that this can be inside of another Paragraph
#The functions only goal is to identify the pressence in the string
return "Bold"
else:
#If none of these exist in the string, we can reasonably assume that it is a simple Paragraph
return "Para"
def mrkdwnTitle2html(stringValue):
#Due to the complexity with 6 possible heading sizes, Title conversion has a dedicated function
#Use a descending list
for i in range(6,0,-1):
if re.search( (f"\A{i*'#'}" ), stringValue):
return f"<h{i}> {mrkdwn2html(stringValue.replace('#',''))} </h{i}>"
def mrkdwnLink2html(stringValue):
#First test to see if the entire string is a link
if re.search("^[.*](.*)$", stringValue):
text,link = [string.replace("[","").replace(")","") for string in stringValue.split("](")]
return f"<a href={link}>{text}</a>"
#In this case we can assume that the link is embeded in some other text
else:
#Work out the string before the link
before = stringValue.split("[")
#Work out href and link text
text,link = before[1].split(")")[0].split("](")
#Work out the string after the link
after = stringValue.replace(f"{before[0]}[{text}]({link})","")
#Check for additional formatting
if mrkdwnEval(before[0]) != "Para":
before[0] = mrkdwn2html(before[0])
if mrkdwnEval(after) != "Para":
after = mrkdwn2html(after)
if mrkdwnEval(text) != "Para":
text = mrkdwn2html(text)
#Return final string
return f"{before[0]} <a href={link}>{text}</a> {after}"
def mrkdwnBold2html(stringValue):
#Similar to the link function
#First test to see if the entire string is in bold
if re.search("^\\*\\*.*\\*\\*$", stringValue) or re.search("^__.*__$", stringValue):
return f"<b>{stringValue.replace('*','').replace('__','')}</b>"
#In this case we can assume that the bold is embeded in some other text
marker = "**" if "*" in stringValue else "__"
#Due to 2 possible markers for bold
#Work out before, bold and after text
before, text, after = stringValue.split(marker,2)
#Check the new strings for more makers and convert
if mrkdwnEval(before) != "Para":
before = mrkdwn2html(before)
if mrkdwnEval(after) != "Para":
after = mrkdwn2html(after)
if mrkdwnEval(after) != "Para":
after = mrkdwn2html(after)
return f"{before}<b>{text}</b>{after}"
def mrkdwn2html(mrkdwn):
#Generate an empty string
html = ""
#Loop every line in the string
for string in mrkdwn.splitlines():
#Ensure line is not blank, an empty line can be passed
if string:
#Evaluate the strings type
type = mrkdwnEval(string)
#Perform the conversion
if type == "Para":
html = html + f"<p>{string}</p>"
elif type == "Link":
html = html + mrkdwnLink2html(string)
elif type == "Head":
html = html + mrkdwnTitle2html(string)
elif type == "Bold":
html = html + mrkdwnBold2html(string)
else:
html = html + f"<li>{string.replace('* ','')}</li>"
#Return final html
#NOTE - This does not include headers adn other html formatting
return html
``` |
{
"source": "JohnMorrisonn/DS-Unit-3-Sprint-2-SQL-and-Databases",
"score": 3
} |
#### File: DS-Unit-3-Sprint-2-SQL-and-Databases/module1-introduction-to-sql/rpg_queries.py
```python
import pandas as pd
import sqlite3
import numpy
import sys
# ## The RPG Dataset
# In[105]:
conn = sqlite3.connect('rpg_db (1).sqlite3')
# In[106]:
c = conn.cursor()
c.execute('SELECT * FROM charactercreator_character').fetchall()
conn.commit()
# In[11]:
query = 'SELECT * FROM charactercreator_character'
pd.read_sql(query, conn)
# ### How many characters are in the dataset?
# In[19]:
# How many characters are in the dataset
c.execute('SELECT COUNT(character_id) FROM charactercreator_character')
result = c.fetchone()
result
# ### How many tables are in the dataset?
# In[40]:
def get_sql_tables(db_con):
c = db_con.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table';")
return c.fetchall()
get_sql_tables(conn)
# ### How man characters of each subclass?
# In[75]:
# How many of each specific subclass
sub_class = ['charactercreator_cleric',
'charactercreator_fighter',
'charactercreator_mage',
'charactercreator_thief',]
def get_subclass_count(db_con, tables):
c = db_con.cursor()
c.execute('SELECT COUNT(character_ptr_id) FROM {}'.format(tables))
return c.fetchone()
for DB in sub_class:
print(DB, get_subclass_count(conn, DB))
# In[72]:
# Alternative way
query_cleric = 'SELECT COUNT(*) FROM charactercreator_character AS cc INNER JOIN charactercreator_cleric AS cleric ON cc.character_id = cleric.character_ptr_id'
c.execute(query_cleric)
result = c.fetchone()
print('cleric', result)
query_mage = 'SELECT COUNT(*) FROM charactercreator_character AS cc INNER JOIN charactercreator_mage AS mage ON cc.character_id = mage.character_ptr_id'
c.execute(query_mage)
result = c.fetchone()
print('mage', result)
query_fighter = 'SELECT COUNT(*) FROM charactercreator_character AS cc INNER JOIN charactercreator_fighter AS fighter ON cc.character_id = fighter.character_ptr_id'
c.execute(query_fighter)
result = c.fetchone()
print('fighter', result)
query_thief = 'SELECT COUNT(*) FROM charactercreator_character AS cc INNER JOIN charactercreator_thief AS thief ON cc.character_id = thief.character_ptr_id'
c.execute(query_thief)
result = c.fetchone()
print('thief', result)
# ### How many items are there total?
# In[83]:
# How many total items?
query = 'SELECT COUNT(*) FROM armory_item'
c.execute(query).fetchone()
# ### And Weapons?
# In[103]:
# How many are weapons and how many are not
query_weapon = 'SELECT item_id FROM armory_item INNER JOIN armory_weapon ON item_id = item_ptr_id'
print('Weapon count', len(c.execute(query_weapon).fetchall()))
query_non_weapon = 'SELECT item_id FROM armory_item EXCEPT SELECT item_ptr_id FROM armory_weapon'
print('Not weapon', len(c.execute(query_non_weapon).fetchall()))
# ### How many items and weapons per character?
# In[82]:
# How many items does each character have
query = 'SELECT character_id as char_id, COUNT(item_id) FROM charactercreator_character_inventory GROUP BY character_id LIMIT 20'
df = pd.read_sql(query, conn)
df
# In[117]:
# How many Weapons does each character have
query = 'SELECT character_id, COUNT(item_id) FROM charactercreator_character_inventory INNER JOIN armory_weapon ON item_id = item_ptr_id GROUP BY character_id LIMIT 20 '
df = pd.read_sql(query, conn)
df
# ### What is the average number of items and weapons carried?
# In[124]:
# How many items on Average does each character have
query = '''SELECT AVG(item_count) FROM
(SELECT character_id, COUNT(item_id) as item_count FROM charactercreator_character_inventory GROUP BY character_id) '''
df = pd.read_sql(query, conn)
df
# In[125]:
# How many weapons on Average does each character have
# This results in the average from those WITH weapons. Not those without weapons
query = '''SELECT AVG(item_count) FROM
(SELECT character_id, COUNT(item_id) as item_count FROM charactercreator_character_inventory INNER JOIN armory_weapon ON item_id = item_ptr_id GROUP BY character_id) '''
df = pd.read_sql(query, conn)
df
# In[126]:
# How many weapons on Average does each character have, INCLUDING those without weapons
query = '''SELECT AVG(wc) FROM (SELECT cci.character_id as `Character Id`, COUNT(aw.item_ptr_id) as wc
FROM charactercreator_character_inventory as cci INNER JOIN armory_item as ai ON cci.item_id = ai.item_id
LEFT JOIN armory_weapon as aw ON ai.item_id = aw.item_ptr_id
GROUP BY cci.character_id)
'''
df = pd.read_sql(query,conn)
df
``` |
{
"source": "JohnMorrisonn/DS-Unit-3-Sprint-3-Productization-and-Cloud",
"score": 3
} |
#### File: Sprint-challenge/aq_app/aq_dashboard.py
```python
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
from aq_app import openaq
from aq_app import APP, DB
# Set the api
api = openaq.OpenAQ()
# function to pull pm25 air values with utc date per city
def get_pm25(api, city):
measurements = api.measurements(city=city, parameter='pm25')
results = measurements[1]['results']
values = []
for i in range(len(results)):
utc = results[i]['date']['utc']
value = results[i]['value']
values.append((utc, value))
return values
# function to add new records to DB
def add_current_record(api, city):
measurements = api.measurements(city=city, parameter='pm25')
results = measurements[1]['results']
for i in range(len(results)):
utc = results[i]['date']['utc']
value = results[i]['value']
new_record = Record(datetime=str(utc), value=str(value))
DB.session.add(new_record)
# root currently displays filtered value lists
@APP.route('/')
def root():
print('Something here')
filter_1 = Record.query.filter(Record.value > 10).all()
filter_2 = Record.query.filter(Record.value < 2.5).all()
return render_template('record.html', filter_1=filter_1, filter_2=filter_2)
# raw file from Part 2
@APP.route('/raw')
def raw():
return str(get_pm25(api, 'Los Angeles'))
# refresh data
@APP.route('/refresh')
def refresh():
DB.drop_all()
DB.create_all()
add_current_record(api, 'Los Angeles')
DB.session.commit()
return ('Data Refreshed!')
class Record(DB.Model):
id = DB.Column(DB.Integer, primary_key=True)
datetime = DB.Column(DB.String(25))
value = DB.Column(DB.Float, nullable=False)
def __repr__(self):
return '<Time {}, Value {}'.format(self.datetime, self.value)
``` |
{
"source": "JohnMorrisonn/JM-Data-Engineer2",
"score": 2
} |
#### File: JohnMorrisonn/JM-Data-Engineer2/app.py
```python
from flask import Flask, render_template, request, jsonify
from decouple import config
from functions import get_query, custom_stats, predict_proba
from visualizations import make_visuals
from mysql.connector.cursor import MySQLCursorPrepared
import os
import pandas as pd
import mysql.connector
import pickle
# Remove later ##
flipped = {0: 'Space Exploration',
1: 'Wearables',
2: 'Hardware',
3: 'Software',
4: 'Web',
5: 'Sound',
6: "Children's Books",
7: 'Calendars',
8: 'Art Books',
9: 'Fiction',
10: 'Nature',
11: 'People',
12: 'Letterpress',
13: 'Literary Journals',
14: 'Nonfiction',
15: 'Footwear',
16: 'Jewelry',
17: 'Pet Fashion',
18: 'Ready-to-wear',
19: 'Apparel',
20: 'Animation',
21: 'Comedy',
22: 'Documentary',
23: 'Action',
24: 'Textiles',
25: 'Sculpture',
26: 'Public Art',
27: 'Performance Art',
28: 'Crafts',
29: 'DIY',
30: 'Woodworking',
31: 'Knitting',
32: 'Candles',
33: 'Quilts',
34: 'Glass',
35: 'Embroidery',
36: 'Crochet',
37: 'Pottery',
38: 'Product Design',
39: 'Graphic Design',
40: 'Design',
41: 'Typography',
42: 'Interactive Design',
43: 'Civic Design',
44: 'Architecture',
45: 'Shorts',
46: 'Narrative Film',
47: 'Film & Video',
48: 'Webseries',
49: 'Thrillers',
50: 'Family',
51: 'Experimental',
52: 'Science Fiction',
53: 'Fantasy',
54: 'Music Videos',
55: 'Horror',
56: 'Movie Theaters',
57: 'Drama',
58: 'Romance',
59: 'Television',
60: 'Festivals',
61: 'Food',
62: 'Small Batch',
63: "Farmer's Markets",
64: 'Restaurants',
65: 'Farms',
66: 'Drinks',
67: 'Events',
68: 'Food Trucks',
69: 'Cookbooks',
70: 'Vegan',
71: 'Spaces',
72: 'Community Gardens',
73: 'Bacon',
74: 'Fashion',
75: 'Accessories',
76: 'Couture',
77: 'Childrenswear',
78: 'Places',
79: 'Digital Art',
80: 'Flight',
81: 'Graphic Novels',
82: 'Dance',
83: 'R&B',
84: 'Performances',
85: 'Gaming Hardware',
86: 'Mobile Games',
87: 'Gadgets',
88: 'Young Adult',
89: 'Illustration',
90: 'Translations',
91: 'Zines',
92: 'Weaving',
93: 'Ceramics',
94: 'Radio & Podcasts',
95: 'Immersive',
96: 'Technology',
97: 'Blues',
98: 'DIY Electronics',
99: 'Jazz',
100: 'Electronic Music',
101: 'Apps',
102: 'Camera Equipment',
103: 'Robots',
104: '3D Printing',
105: 'Workshops',
106: 'Poetry',
107: 'Photobooks',
108: 'Photography',
109: 'World Music',
110: 'Mixed Media',
111: 'Residencies',
112: 'Fine Art',
113: 'Classical Music',
114: 'Printing',
115: 'Webcomics',
116: 'Animals',
117: 'Publishing',
118: 'Kids',
119: 'Academic',
120: 'Periodicals',
121: 'Anthologies',
122: 'Indie Rock',
123: 'Comic Books',
124: 'Games',
125: 'Tabletop Games',
126: 'Installations',
127: 'Conceptual Art',
128: 'Playing Cards',
129: 'Puzzles',
130: 'Metal',
131: 'Video Games',
132: 'Photo',
133: 'Pop',
134: 'Rock',
135: 'Country & Folk',
136: 'Print',
137: 'Video',
138: 'Latin',
139: 'Faith',
140: 'Art',
141: 'Painting',
142: 'Video Art',
143: 'Makerspaces',
144: 'Hip-Hop',
145: 'Music',
146: 'Stationery',
147: 'Punk',
148: 'Fabrication Tools',
149: 'Chiptune',
150: 'Musical',
151: 'Theater',
152: 'Comics',
153: 'Plays',
154: 'Journalism',
155: 'Audio',
156: 'Literary Spaces',
157: 'Live Games',
158: 'Taxidermy'}
# Create the app
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
# Load in the baseline model
filename = open('model.pkl', 'rb')
model = pickle.load(filename)
filename = open('tfidf.pkl', 'rb')
tfidf = pickle.load(filename)
# Create routes to post the prediction
@app.route('/', methods=['POST'])
def predict():
"""
Uses randomforest/NLP to classify if the user's input
will succeed or not and adds to the json dict output.
"""
# User input from front-end
data = request.get_json(force=True)
# Change json to dataframe
data.update((x, [y]) for x, y in data.items())
data_df = pd.DataFrame.from_dict(data)
# If user input contains anything the model doesn't
drop_columns = ['campaignName', 'description']
data_df.drop(columns = drop_columns, inplace=True)
# Results for RF/NLP model
model_result = model.predict(data_df)
# --------------------------------------------------------------
# Create connection and cursor for querying custom/general stats
mydb = mysql.connector.connect(
host = config('hostname'),
user = config('username'),
passwd = config('password'),
db = config('database_name'),
use_pure=True
)
cursor = mydb.cursor(cursor_class=MySQLCursorPrepared)
# Filter out category and monetaryGoal from user data
category = data_df['categories'].map(flipped)[0]
goal = int(data_df['monetaryGoal'][0])
# Custom stats
custom_results = custom_stats(category, goal, cursor)
# --------------------------------------------------------------
probability = predict_proba(model, data_df)
# Final output dict
output = {'results': int(model_result[0]),
'custom_stats': {
'raising_more_success' : custom_results[0],
'category_success' : custom_results[1],
'category_average' : custom_results[2],
'average_duration' : custom_results[3],
'average_backers' : custom_results[4],
'average_over' : custom_results[5]
},
'prediction_results': int(probability)
}
return jsonify(output)
@app.route('/visualizations', methods=['POST'])
def visualizations():
# User input from front-end
data = request.get_json(force=True)
# Change json to dataframe
data.update((x, [y]) for x, y in data.items())
data_df = pd.DataFrame.from_dict(data)
# If user input contains anything the model doesn't
drop_columns = ['campaignName', 'description']
data_df.drop(columns = drop_columns, inplace=True)
return make_visuals(data_df)
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "johnmous/sfaira",
"score": 2
} |
#### File: dataloaders/databases/super_group.py
```python
from typing import Union
from sfaira.data.dataloaders.base.dataset_group import DatasetGroup, DatasetSuperGroup
from sfaira.data.dataloaders.databases.cellxgene import DatasetSuperGroupCellxgene
class DatasetSuperGroupDatabases(DatasetSuperGroup):
def __init__(
self,
data_path: Union[str, None] = None,
meta_path: Union[str, None] = None,
cache_path: Union[str, None] = None,
cache_metadata: bool = False,
):
dataset_super_groups = [
DatasetSuperGroupCellxgene(
data_path=data_path,
meta_path=meta_path,
cache_path=cache_path,
cache_metadata=cache_metadata,
),
]
super().__init__(dataset_groups=dataset_super_groups)
```
#### File: loaders/d10_1038_s41586_020_2922_4/human_lung_2020_x_travaglini_001.py
```python
import anndata
import os
import scipy.sparse
import numpy as np
def load(data_dir, sample_fn, **kwargs):
fn = os.path.join(data_dir, sample_fn)
if sample_fn.split("_")[0] == "droplet":
norm_const = 10000
sf_key = "nUMI"
else:
norm_const = 1000000
sf_key = "nReads"
adata = anndata.read(fn)
adata.X = scipy.sparse.csc_matrix(adata.X)
adata.X = np.expm1(adata.X)
adata.X = adata.X.multiply(scipy.sparse.csc_matrix(adata.obs[sf_key].values[:, None])).multiply(1 / norm_const)
return adata
```
#### File: loaders/d10_1101_2020_10_12_335331/human_blood_2020_10x_hao_001.py
```python
import anndata
import gzip
import os
import pandas as pd
import scipy.io
import tarfile
def load(data_dir, **kwargs):
fn = os.path.join(data_dir, "GSE164378_RAW.tar")
adatas = []
with tarfile.open(fn) as tar:
samples = ['GSM5008737_RNA_3P', 'GSM5008738_ADT_3P']
for sample in samples:
with gzip.open(tar.extractfile(sample + '-matrix.mtx.gz'), 'rb') as mm:
x = scipy.io.mmread(mm).T.tocsr()
obs = pd.read_csv(tar.extractfile(sample + '-barcodes.tsv.gz'), compression='gzip',
header=None, sep='\t', index_col=0)
obs.index.name = None
var = pd.read_csv(tar.extractfile(sample + '-features.tsv.gz'), compression='gzip',
header=None, sep='\t').iloc[:, :1]
var.columns = ['names']
var.index = var['names'].values
adata = anndata.AnnData(X=x, obs=obs, var=var)
adata.var_names_make_unique()
adatas.append(adata)
tar.close()
adata = adatas[0]
protein = adatas[1]
meta = pd.read_csv(os.path.join(data_dir, 'GSE164378_sc.meta.data_3P.csv.gz'), index_col=0)
adata.obs = adata.obs.join(meta)
adata.obsm['protein_expression'] = pd.DataFrame(protein.X.A, columns=protein.var_names, index=protein.obs_names)
return adata
```
#### File: loaders/d10_1126_science_abe6474/homosapiens_x_2022_10x5transcriptionprofiling_zheng_001.py
```python
import anndata
import gzip
import os
import pandas as pd
import tarfile
import scanpy as sc
import scipy.sparse
def ir_read_from_chain_gz(fn, sep, barcodes, id_col, locus_col, v_call_col, d_call_col, j_call_col, c_call_col,
productive_col, junction_col, junction_aa_col, consensus_count_col, duplicate_count_col):
"""
Util function to read the VDJ files with custom record format
Replaces ir.io.read_airr.
ToDo: Can this code be moved to scirpy? This file format does not seem very rare.
E.g. of the form d10_1126_science_abe6474::GSE156728_10X_VDJ.merge.txt.gz.
Issues with ir.io.read_airr on this file are:
- gzip compression
- non-standard column names
- selected extraction of cells into AIRR format to save time
"""
from scirpy.io._io import from_airr_cells, AirrCell, DEFAULT_AIRR_CELL_ATTRIBUTES, DEFAULT_AIRR_FIELDS
tab = pd.read_csv(fn, sep=sep, compression="gzip")
airr_cells = {}
for bc in barcodes:
airr_cells[bc] = AirrCell(
cell_id=bc,
cell_attribute_fields=DEFAULT_AIRR_CELL_ATTRIBUTES,
)
# Get all chains for this barcode:
tab_i = tab.loc[tab[id_col].values == bc, :]
for i in range(tab_i.shape[0]):
chain = {
"productive": tab_i[productive_col].values[i],
"locus": tab_i[locus_col].values[i],
"v_call": tab_i[v_call_col].values[i],
"d_call": tab_i[d_call_col].values[i],
"j_call": tab_i[j_call_col].values[i],
"c_call": tab_i[c_call_col].values[i],
"junction": tab_i[junction_col].values[i],
"junction_aa": tab_i[junction_aa_col].values[i],
"consensus_count": tab_i[consensus_count_col].values[i],
"duplicate_count": tab_i[duplicate_count_col].values[i],
"sequence_id": f"{bc}_{i}",
"sequence": None,
"rev_comp": None,
"sequence_alignment": None,
"germline_alignment": None,
"v_cigar": None,
"d_cigar": None,
"j_cigar": None,
}
airr_cells[bc].add_chain(chain)
return from_airr_cells(airr_cells.values(), include_fields=DEFAULT_AIRR_FIELDS)
def load(data_dir, sample_fn, **kwargs):
import scirpy as ir
fn = os.path.join(data_dir, sample_fn + ".counts.txt.gz")
# Some of the count matrices are in a tar archive, the rest is given as individual downloads
if sample_fn.startswith("GSE156728_RAW/"):
fn_tar = os.path.join(data_dir, "GSE156728_RAW.tar")
with tarfile.open(fn_tar) as tar:
with gzip.open(tar.extractfile(sample_fn.split("GSE156728_RAW/")[-1] + ".counts.txt.gz"), "rb") as f:
tab = pd.read_csv(f, sep="\t", index_col=0).T
adata = anndata.AnnData(tab)
else:
adata = sc.read(fn).transpose()
adata.X = scipy.sparse.csr_matrix(adata.X)
fn_meta = os.path.join(data_dir, "GSE156728_metadata.txt.gz")
tab_meta = pd.read_csv(fn_meta, sep="\t", compression="gzip")
tab_meta.index = tab_meta["cellID"].values
del tab_meta["cellID"]
adata.obs = tab_meta.loc[adata.obs_names, :]
fn_vdj = os.path.join(data_dir, "GSE156728_10X_VDJ.merge.txt.gz")
adata_tcr = ir_read_from_chain_gz(
fn=fn_vdj,
barcodes=adata.obs_names,
sep="\t",
id_col="barcode",
locus_col="chain",
v_call_col="v_gene",
d_call_col="d_gene",
j_call_col="j_gene",
c_call_col="c_gene",
productive_col="productive",
junction_col="cdr3_nt",
junction_aa_col="cdr3",
consensus_count_col="umis",
duplicate_count_col="reads",
)
ir.pp.merge_with_ir(adata, adata_tcr)
return adata
```
#### File: loaders/d10_1126_science_abj4008/homosapiens_blood_2022_10x3v3_schmidt_001.py
```python
import os
import pandas as pd
import scanpy as sc
def load(data_dir, sample_fn, **kwargs):
adata = sc.read_10x_mtx(data_dir, prefix="GSE190604_")
fn_meta = os.path.join(data_dir, "GSE190604_cellranger-guidecalls-aggregated-unfiltered.txt.gz")
tab_meta = pd.read_csv(fn_meta, compression="gzip", sep="\t")
tab_meta.index = tab_meta["cell_barcode"].values
del tab_meta["cell_barcode"]
adata.obs = pd.concat([adata.obs, tab_meta], axis=1)
return adata
```
#### File: dataloaders/loaders/super_group.py
```python
import pydoc
import os
from typing import List, Union
from warnings import warn
from sfaira.data.dataloaders.base.dataset_group import DatasetSuperGroup, DatasetGroupDirectoryOriented
class DatasetSuperGroupLoaders(DatasetSuperGroup):
dataset_groups: List[DatasetGroupDirectoryOriented]
def __init__(
self,
data_path: Union[str, None] = None,
meta_path: Union[str, None] = None,
cache_path: Union[str, None] = None,
):
"""
Class that sits on top of a directory of data set directories that each contain a data set group.
:param file_base:
:param dir_prefix: Prefix to sub-select directories by. Set to "" for no constraints.
:param data_path:
:param meta_path:
:param cache_path:
"""
# Directory choice hyperparamters:
dir_prefix = "d"
dir_exclude = []
# Collect all data loaders from files in directory:
dataset_groups = []
cwd = os.path.dirname(__file__)
for f in os.listdir(cwd):
if os.path.isdir(os.path.join(cwd, f)): # only directories
if f[:len(dir_prefix)] == dir_prefix and f not in dir_exclude: # Narrow down to data set directories
path_dsg = str(pydoc.locate(f"sfaira.data.dataloaders.loaders.{f}.FILE_PATH"))
if path_dsg is not None:
try:
dsg = DatasetGroupDirectoryOriented(
file_base=path_dsg,
data_path=data_path,
meta_path=meta_path,
cache_path=cache_path
)
dsg.collection_id = f
dataset_groups.append(dsg)
except IndexError as e:
raise IndexError(f"{e} for '{cwd}', '{f}', '{path_dsg}'")
else:
warn(f"DatasetGroupDirectoryOriented was None for {f}")
super().__init__(dataset_groups=dataset_groups)
```
#### File: store/carts/utils.py
```python
from typing import Union, Tuple, Dict
import pandas as pd
def split_batch(x: Union[Tuple, Dict]):
"""
Splits retrieval batch into consumption batches of length 1.
Often, end-user consumption batches would be observation-wise, ie yield a first dimension of length 1.
:param x: Tuple or Dict
One of the following:
* Data tuple of length 1 or 2: (input,) or (input, output,), where both input and output are also a tuple,
but of batch-dimensioned tensors.
* Dict
"""
if isinstance(x, tuple):
batch_dim = x[0][0].shape[0]
for i in range(batch_dim):
output = []
for y in x:
if isinstance(y, tuple):
output.append(tuple([z.iloc[[i], :] if isinstance(z, pd.DataFrame) else z[i, :] for z in y]))
else:
output.append(y.iloc[[i], :] if isinstance(y, pd.DataFrame) else y[i, :])
yield tuple(output)
elif isinstance(x, dict):
keys = list(x.keys())
batch_dim = x[keys[0]].shape[0]
for i in range(batch_dim):
yield {key: x[key].iloc[[i], :] if isinstance(x[key], pd.DataFrame) else x[key][i, :] for key in keys}
else:
raise ValueError('Input to split_batch(x) has to be either a Tuple or a Dict')
```
#### File: data/utils_scripts/test_store.py
```python
import os
import sys
import time
import warnings
from typing import Dict, List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sb
import sfaira
# Set global variables.
print(f'sys.argv: {sys.argv}')
N_DRAWS = 320_000
LEN_IDX = 5_000_000
BATCH_SIZE = 1 # must be 0 or 1
OBS_KEYS = ['cell_type', 'cell_line', 'organism', 'organ'] # list of obs_keys to retrieve
RETRIEVAL_BATCH_SIZE = 65536 # number of samples to retrieve at once
path_store_h5ad = str(sys.argv[1])
path_store_dao = str(sys.argv[2])
path_out = str(sys.argv[3])
store_type = ['dao']
if path_store_h5ad.lower() != 'none':
store_type.append("h5ad")
time_measurements_initiate = {'storage_format': [], 'instantiation_time': [], 'run': []}
memory_measurements_initiate = {'storage_format': [], 'memory_usage': [], 'run': []}
time_measurements = {
'scenario': [], 'storage_format': [], 'data_access_type': [], 'varsubset': [], 'avg_time_per_sample': []
}
def _map_fn(x_sample, obs_sample):
gene_expression = np.asarray(x_sample)
obs = tuple(obs_sample[obs_key].to_numpy().reshape((-1, 1)) for obs_key in OBS_KEYS)
x = (gene_expression,)
y = (gene_expression,) + obs
return x, y
def _time_gen(_store: sfaira.data.store.StoreSingleFeatureSpace,
store_format: str,
kwargs_generator: Dict[str, any],
num_draws: int) -> List[float]:
if store_format == "h5ad":
del kwargs_generator["random_access"]
if kwargs_generator["var_subset"]:
gc = sfaira.versions.genomes.genomes.GenomeContainer(organism='Homo sapiens', release='104')
gc.set(symbols=["VTA1", "MLXIPL", "BAZ1B", "RANBP9", "PPARGC1A", "DDX25", "CRYAB"])
_store.genome_container = gc
del kwargs_generator["var_subset"]
_gen = (
_store
.checkout(**kwargs_generator)
.iterator()
)
_measurements = []
for _ in range(num_draws):
_t0 = time.perf_counter()
_ = next(_gen)
_measurements.append(time.perf_counter() - _t0)
return _measurements
def _create_generator_kwargs(index: np.ndarray,
var_subset: bool,
random_batch_access: bool,
random_access: bool):
if random_access and random_batch_access:
raise ValueError('You cannot select "random_access" and "random_batch_access" at the same time')
return {
"idx": index,
"batch_size": BATCH_SIZE,
"retrieval_batch_size": RETRIEVAL_BATCH_SIZE,
"map_fn": _map_fn,
"obs_keys": OBS_KEYS,
"randomized_batch_access": random_batch_access,
"random_access": random_access,
"var_subset": var_subset,
"return_dense": True
}
# check if data sets contain the same datasets
if path_store_h5ad.lower() != 'none':
store = sfaira.data.load_store(cache_path=path_store_dao, store_format="dao").stores['Homo sapiens']
data_set_lengths_dao = {dataset: len(idx_arr) for dataset, idx_arr in store.indices.items()}
store = sfaira.data.load_store(cache_path=path_store_h5ad, store_format="h5ad").stores['Homo sapiens']
data_set_lengths_h5ad = {dataset: len(idx_arr) for dataset, idx_arr in store.indices.items()}
for dataset in set(list(data_set_lengths_dao.keys()) + list(data_set_lengths_h5ad.keys())):
if dataset not in data_set_lengths_dao:
warnings.warn(f'{dataset} dataset missing in dao storage')
continue
if dataset not in data_set_lengths_h5ad:
warnings.warn(f'{dataset} dataset missing in h5ad storage')
continue
n_cells_dao = data_set_lengths_dao[dataset]
n_cells_h5ad = data_set_lengths_h5ad[dataset]
if n_cells_dao != n_cells_h5ad:
warnings.warn(
f'{dataset} dataset has different lengths in dao (n={n_cells_dao} cells) storage '
f'and h5ad storage (n={n_cells_h5ad} cells)'
)
for store_type_i in store_type:
print(f'Benchmarking {store_type_i} storage')
path_store = path_store_h5ad if store_type_i == "h5ad" else path_store_dao
print('Benchmarking storage instantiation')
for i in range(3):
t0 = time.perf_counter()
store = sfaira.data.load_store(cache_path=path_store, store_format=store_type_i)
# Include initialisation of generator in timing to time overhead generated here.
_ = store.checkout(map_fn=_map_fn, obs_keys=OBS_KEYS).iterator()
time_measurements_initiate['instantiation_time'].append(time.perf_counter() - t0)
time_measurements_initiate['storage_format'].append(store_type_i)
time_measurements_initiate['run'].append(i)
memory_measurements_initiate['memory_usage'].append(np.sum(list(store.adata_memory_footprint.values())))
memory_measurements_initiate['storage_format'].append(store_type_i)
memory_measurements_initiate['run'].append(i)
# Prepare benchmark
store = sfaira.data.load_store(cache_path=path_store, store_format=store_type_i).stores['Homo sapiens']
if BATCH_SIZE == 1:
n_draws = int(N_DRAWS)
else:
n_draws = int(N_DRAWS * RETRIEVAL_BATCH_SIZE)
for scenario in ['seq_idx', 'random_idx']:
print(f'Benchmarking scenario: {scenario}')
if scenario == 'seq_idx':
idx = np.arange(0, min(LEN_IDX, store.n_obs), dtype=int)
elif scenario == 'random_idx':
idx = np.arange(0, min(LEN_IDX, store.n_obs), dtype=int)
np.random.shuffle(idx)
else:
raise ValueError(f'scenario={scenario} is not defined')
for data_access_type in ['sequential', 'random-batch-access', 'random-access']:
for varsubset in [False, True]:
time_measurements['scenario'].append(scenario)
time_measurements['storage_format'].append(store_type_i)
time_measurements['data_access_type'].append(data_access_type)
time_measurements['varsubset'].append(varsubset)
if data_access_type == 'sequential':
random_batch_access_ = False
random_access_ = False
elif data_access_type == 'random-batch-access':
random_batch_access_ = True
random_access_ = False
elif data_access_type == 'random-access':
random_batch_access_ = False
random_access_ = True
else:
raise ValueError(f'data_access_type={data_access_type} is not supported')
kwargs = _create_generator_kwargs(idx, varsubset, random_batch_access_, random_access_)
measurements = _time_gen(store, store_type_i, kwargs, num_draws=min(n_draws, len(idx)))
time_measurements['avg_time_per_sample'].append(np.mean(measurements))
# prepare results
instatiation_time_df = pd.DataFrame(time_measurements_initiate)
memory_usage_df = pd.DataFrame(memory_measurements_initiate)
res_df = pd.DataFrame(time_measurements).assign(avg_time_per_sample=lambda xx: xx.avg_time_per_sample * 10**6)
# save results to csv
res_df.to_csv(os.path.join(path_out, 'data_store_benchmark.csv'))
instatiation_time_df.to_csv(os.path.join(path_out, 'instantiation_time_benchmark.csv'))
memory_usage_df.to_csv(os.path.join(path_out, 'memory_usage_benchmark.csv'))
# create figures
fig, axs = plt.subplots(nrows=3, ncols=2, figsize=(12, 12))
axs[0, 0].set_title('Storage instantiation time')
sb.barplot(
x='storage_format', y='instantiation_time', data=instatiation_time_df, ax=axs[0, 0]
)
axs[0, 0].set_ylabel('time [s]')
axs[0, 0].set_yscale('log')
axs[0, 1].set_title('Storage memory footprint')
sb.barplot(x='storage_format', y='memory_usage', data=memory_usage_df, ax=axs[0, 1])
axs[0, 1].set_ylabel('memory usage [MB]')
axs[1, 0].set_title('Avg. time per sample [μs] | seq_idx & varsubset=False')
sb.barplot(
x='storage_format',
y='avg_time_per_sample',
hue='data_access_type',
data=res_df[res_df.varsubset.eq(False) & res_df.scenario.eq('seq_idx')],
ax=axs[1, 0]
)
axs[1, 0].set_ylabel('avg. time [μs]')
axs[1, 0].set_yscale('log')
axs[1, 1].set_title('Avg. time per sample [μs] | seq_idx & varsubset=True')
sb.barplot(
x='storage_format',
y='avg_time_per_sample',
hue='data_access_type',
data=res_df[res_df.varsubset.eq(True) & res_df.scenario.eq('seq_idx')],
ax=axs[1, 1]
)
axs[1, 1].set_ylabel('avg. time [μs]')
axs[1, 1].set_yscale('log')
axs[2, 0].set_title('Avg. time per sample [μs] | random_idx & varsubset=False')
sb.barplot(
x='storage_format',
y='avg_time_per_sample',
hue='data_access_type',
data=res_df[res_df.varsubset.eq(False) & res_df.scenario.eq('random_idx')],
ax=axs[2, 0]
)
axs[2, 0].set_ylabel('avg. time [μs]')
axs[2, 0].set_yscale('log')
axs[2, 1].set_title('Avg. time per sample [μs] | random_idx & varsubset=True')
sb.barplot(
x='storage_format',
y='avg_time_per_sample',
hue='data_access_type',
data=res_df[res_df.varsubset.eq(True) & res_df.scenario.eq('random_idx')],
ax=axs[2, 1]
)
axs[2, 1].set_ylabel('avg. time [μs]')
axs[2, 1].set_yscale('log')
# set y-scale to same range
y_lims = []
for i in range(1, 3):
for j in range(0, 2):
y_lims.append(axs[i, j].get_ylim()[1])
for i in range(1, 3):
for j in range(0, 2):
axs[i, j].set_ylim(1, max(y_lims))
plt.tight_layout()
plt.savefig(os.path.join(path_out, 'data_store_benchmark.pdf'))
```
#### File: estimators/torch/losses.py
```python
import numpy as np
import torch
class LossLoglikelihoodNb(torch.nn.Module):
def __init__(self, average=True):
super(LossLoglikelihoodNb, self).__init__()
self.average = average
def forward(self, preds, target):
"""Implements the negative log likelihood loss as VAE reconstruction loss"""
x = target
loc, scale = torch.chunk(preds, chunks=2, dim=1)
eta_loc = torch.log(loc)
eta_scale = torch.log(scale)
log_r_plus_mu = torch.log(scale + loc)
ll = torch.lgamma(scale + x)
ll = ll - torch.lgamma(x + torch.ones_like(x))
ll = ll - torch.lgamma(scale)
ll = ll + torch.multiply(x, eta_loc - log_r_plus_mu) + torch.multiply(scale, eta_scale - log_r_plus_mu)
ll = torch.clamp(ll, min=-300, max=300)
neg_ll = -ll
if self.average:
neg_ll = torch.mean(neg_ll)
else:
# sum over features, average over batch
neg_ll = neg_ll.sum(dim=1).sum(dim=1)
return neg_ll
class LossLoglikelihoodGaussian(torch.nn.Module):
def __init__(self, average=True):
super(LossLoglikelihoodGaussian, self).__init__()
self.average = average
def forward(self, preds, target):
"""Implements the gaussian log likelihood loss as VAE reconstruction loss"""
loc, scale = torch.chunk(preds, chunks=2, dim=1)
ll = -torch.log(scale * torch.sqrt(2. * np.pi)) - 0.5 * torch.square((target - loc) / scale)
ll = torch.clamp(ll, min=-300, max=300)
neg_ll = -ll
if self.average:
neg_ll = torch.mean(neg_ll)
else:
# sum over features, average over batch
neg_ll = torch.mean(torch.sum(neg_ll, dim=1), dim=0)
return neg_ll
class LossCrossentropyAgg(torch.nn.Module):
def __init__(self):
super(LossCrossentropyAgg, self).__init__()
def forward(self, preds, target):
""" Modified crossentropy that aggregates allowed output classes into single class. """
preds = torch.clamp(preds, min=1e-10, max=1.)
ll_cce_agg = -torch.log(torch.mean(target * preds, dim=1, keepdim=False))
return ll_cce_agg
class KLLoss(torch.nn.Module):
def __init__(self):
super(KLLoss, self).__init__()
self.beta = self.register_buffer('beta', torch.Tensor(1.))
def forward(self, preds, target):
expected_logqz_x, expected_logpz = torch.chunk(preds, chunks=2, dim=1)
kl_loss = torch.mean(expected_logqz_x - expected_logpz, dim=0)
return self.beta * kl_loss
```
#### File: sfaira/models/base.py
```python
import abc
try:
import tensorflow as tf
except ImportError:
tf = None
class BasicModel(abc.ABC):
"""
This base class defines model attributes shared across all models.
"""
_version: str
_topology_id: str
genome_size: int
hyperparam: dict
model_class: str
model_type: str
@property
def version(self):
return self._version
class BasicModelKeras(BasicModel):
"""
This base class defines model attributes shared across all tf.keras models.
"""
training_model: tf.keras.Model
```
#### File: models/embedding/vae.py
```python
import numpy as np
try:
import tensorflow as tf
except ImportError:
tf = None
from typing import List, Union, Tuple
from sfaira.models.embedding.output_layers import NegBinOutput, NegBinSharedDispOutput, NegBinConstDispOutput, \
GaussianOutput, GaussianSharedStdOutput, GaussianConstStdOutput
from sfaira.versions.topologies import TopologyContainer
from sfaira.models.embedding.base import BasicModelKerasEmbedding
from sfaira.models.pp_layer import PreprocInput
class Sampling(tf.keras.layers.Layer):
"""Uses (z_mean, z_log_var) to sample z."""
def call(self, inputs, **kwargs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
class Encoder(tf.keras.layers.Layer):
"""Maps input to embedding space"""
def __init__(
self,
latent_dim: Tuple,
dropout_rate,
l1_coef: float,
l2_coef: float,
batchnorm: bool,
activation: str,
kernel_initializer: str,
name='encoder',
**kwargs
):
super().__init__(name=name, **kwargs)
self.fwd_pass = []
for i, hid_size in enumerate(latent_dim[:-1]):
self.fwd_pass.append(
tf.keras.layers.Dense(
units=hid_size,
activation=activation,
kernel_initializer=kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l1_l2(l1=l1_coef, l2=l2_coef),
name='enc%s' % i
)
)
# At the bottleneck layer:
# 1. a linear activation function is used to not restrict the support of the hidden units
# 2. no batch normalisation is used to not scale the variance of inactive units up and the activity of
# active units down.
# 3. no dropout is used to not confound the required bottleneck dimension with dropout rate.
if batchnorm and i < (len(latent_dim) - 1):
self.fwd_pass.append(
tf.keras.layers.BatchNormalization(
center=True,
scale=True
)
)
if i < (len(latent_dim) - 1) and dropout_rate > 0:
if activation == "selu":
self.fwd_pass.append(tf.keras.layers.AlphaDropout(dropout_rate))
else:
self.fwd_pass.append(tf.keras.layers.Dropout(dropout_rate, noise_shape=None, seed=None))
# final layer
self.dense_mean = tf.keras.layers.Dense(
units=latent_dim[-1],
activation="linear"
)
self.dense_log_var = tf.keras.layers.Dense(
units=latent_dim[-1],
activation="linear"
)
self.sampling = Sampling()
def call(self, inputs, **kwargs):
x = inputs
for layer in self.fwd_pass:
x = layer(x)
# final layer
z_mean = self.dense_mean(x)
z_log_var = self.dense_log_var(x)
z = self.sampling((z_mean, z_log_var))
return z, z_mean, z_log_var
class Decoder(tf.keras.layers.Layer):
"""Maps latent space sample back to output"""
def __init__(
self,
latent_dim: Tuple,
dropout_rate,
l1_coef: float,
l2_coef: float,
batchnorm: bool,
activation: str,
kernel_initializer: str,
name='decoder',
**kwargs
):
super().__init__(name=name, **kwargs)
self.fwd_pass = []
for i, hid_size in enumerate(latent_dim):
self.fwd_pass.append(
tf.keras.layers.Dense(
units=hid_size,
activation=activation,
kernel_initializer=kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l1_l2(l1=l1_coef, l2=l2_coef),
name='dec%s' % i
)
)
if batchnorm:
self.fwd_pass.append(
tf.keras.layers.BatchNormalization(
center=True,
scale=True
)
)
if dropout_rate > 0.0:
if activation == "selu":
self.fwd_pass.append(tf.keras.layers.AlphaDropout(dropout_rate))
else:
self.fwd_pass.append(tf.keras.layers.Dropout(dropout_rate, noise_shape=None, seed=None))
def call(self, inputs, **kwargs):
x = inputs
for layer in self.fwd_pass:
x = layer(x)
return x
class ModelKerasVae(BasicModelKerasEmbedding):
def predict_reconstructed(self, x: np.ndarray):
return np.split(self.training_model.predict(x)[0], indices_or_sections=2, axis=1)[0]
def __init__(
self,
in_dim,
latent_dim=(128, 64, 2, 64, 128),
dropout_rate=0.1,
l1_coef: float = 0.,
l2_coef: float = 0.,
batchnorm: bool = False,
activation='tanh',
init='glorot_uniform',
output_layer="nb"
):
super(ModelKerasVae, self).__init__()
# Check length of latent dim to divide encoder-decoder stack:
if len(latent_dim) % 2 == 1:
n_layers_enc = len(latent_dim) // 2 + 1
else:
raise ValueError("len(latent_dim)=%i should be uneven to provide a defined bottleneck" % len(latent_dim))
inputs_encoder = tf.keras.Input(shape=(in_dim,), name='counts')
inputs_sf = tf.keras.Input(shape=(1,), name='size_factors')
inputs_encoder_pp = PreprocInput()(inputs_encoder)
output_encoder = Encoder(
latent_dim=latent_dim[:n_layers_enc],
dropout_rate=dropout_rate,
l1_coef=l1_coef,
l2_coef=l2_coef,
batchnorm=batchnorm,
activation=activation,
kernel_initializer=init
)(inputs_encoder_pp)
z, z_mean, z_log_var = output_encoder
expected_logqz_x = -0.5 * tf.reduce_sum(1 + z_log_var, axis=1)
expected_logpz = -0.5 * tf.reduce_sum(tf.square(z_mean) + tf.exp(z_log_var), axis=1)
expected_densities = tf.keras.layers.Concatenate(axis=1, name='kl')([
tf.expand_dims(expected_logqz_x, axis=1),
tf.expand_dims(expected_logpz, axis=1)])
output_decoder = Decoder(
latent_dim=latent_dim[n_layers_enc:],
dropout_rate=dropout_rate,
l1_coef=l1_coef,
l2_coef=l2_coef,
batchnorm=batchnorm,
activation=activation,
kernel_initializer=init
)(z)
if output_layer == 'nb':
output_decoder_expfamily = NegBinOutput(original_dim=in_dim)((output_decoder, inputs_sf))
elif output_layer == 'nb_shared_disp':
output_decoder_expfamily = NegBinSharedDispOutput(original_dim=in_dim)((output_decoder, inputs_sf))
elif output_layer == 'nb_const_disp':
output_decoder_expfamily = NegBinConstDispOutput(original_dim=in_dim)((output_decoder, inputs_sf))
elif output_layer == 'gaussian':
output_decoder_expfamily = GaussianOutput(original_dim=in_dim)((output_decoder, inputs_sf))
elif output_layer == 'gaussian_shared_disp':
output_decoder_expfamily = GaussianSharedStdOutput(original_dim=in_dim)((output_decoder, inputs_sf))
elif output_layer == 'gaussian_const_disp':
output_decoder_expfamily = GaussianConstStdOutput(original_dim=in_dim)((output_decoder, inputs_sf))
else:
raise ValueError("tried to access a non-supported output layer %s" % output_layer)
output_decoder_expfamily_concat = tf.keras.layers.Concatenate(axis=1, name="neg_ll")(output_decoder_expfamily)
self.encoder_model = tf.keras.Model(
inputs=[inputs_encoder, inputs_sf],
outputs=[z, z_mean, z_log_var],
name="encoder_model"
)
self.training_model = tf.keras.Model(
inputs=[inputs_encoder, inputs_sf],
outputs=[output_decoder_expfamily_concat, expected_densities],
name="autoencoder"
)
def predict_embedding(self, x, variational=False):
if variational:
return self.encoder_model.predict(x)
else:
return self.encoder_model.predict(x)[1]
class ModelVaeVersioned(ModelKerasVae):
def __init__(
self,
topology_container: TopologyContainer,
override_hyperpar: Union[dict, None] = None
):
hyperpar = topology_container.topology["hyper_parameters"]
if override_hyperpar is not None:
for k in list(override_hyperpar.keys()):
hyperpar[k] = override_hyperpar[k]
super().__init__(
in_dim=topology_container.n_var,
**hyperpar
)
print('passed hyperpar: \n', hyperpar)
self._topology_id = topology_container.topology_id
self.genome_size = topology_container.n_var
self.model_class = "embedding"
self.model_type = topology_container.model_type
self.hyperparam = dict(
list(hyperpar.items()) + # noqa: W504
[
("topology_id", self._topology_id),
("genome_size", self.genome_size),
("model_class", self.model_class),
("model_type", self.model_type)
]
)
```
#### File: sfaira/models/made.py
```python
from random import randint
import numpy as np
try:
import tensorflow as tf
except ImportError:
tf = None
# ToDo: we are using a lot of tf.keras.backend modules below, can we use tf core instead?
class MaskingDense(tf.keras.layers.Layer):
""" Just copied code from keras Dense layer and added masking and a few other tricks:
- Direct auto-regressive connections to output
- Allows a second (non-autoregressive) input that is fully connected to first hidden
- Either 1 output or 2 outputs (concatenated) that are separately
auto-regressive wrt to the input
"""
def __init__(self, units, out_units,
hidden_layers=1,
dropout_rate=0.0,
random_input_order=False,
activation='elu',
out_activation='linear',
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
out_kernel_initializer='glorot_uniform',
out_bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
name=None,
batchnorm=False,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(MaskingDense, self).__init__(name=name, **kwargs)
self.input_sel = None
self.random_input_order = random_input_order
self.rate = min(1., max(0., dropout_rate))
self.kernel_sels = []
self.units = units
self.out_units = out_units
self.hidden_layers = hidden_layers
self.activation = tf.keras.activations.get(activation)
self.out_activation = tf.keras.activations.get(out_activation) # None gives linear activation
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.out_kernel_initializer = tf.keras.initializers.get(out_kernel_initializer)
self.out_bias_initializer = tf.keras.initializers.get(out_bias_initializer)
self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self.activity_regularizer = tf.keras.regularizers.get(activity_regularizer)
self.kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self.bias_constraint = tf.keras.constraints.get(bias_constraint)
self.batchnorm = batchnorm
def dropout_wrapper(self, inputs, training):
if 0. < self.rate < 1.:
def dropped_inputs():
return tf.keras.backend.dropout(inputs, self.rate, noise_shape=None, seed=None)
return tf.keras.backend.in_train_phase(dropped_inputs, inputs, training=training)
return inputs
def build_layer_weights(
self,
input_dim,
units,
use_bias=True,
is_output=False,
id=''
):
kernel_initializer = (self.kernel_initializer if not is_output
else self.out_kernel_initializer)
bias_initializer = (self.bias_initializer if not is_output
else self.out_bias_initializer)
kernel = self.add_weight(shape=(input_dim, units),
initializer=kernel_initializer,
name='kernel' + id,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if use_bias:
bias = self.add_weight(shape=(units,),
initializer=bias_initializer,
name='bias' + id,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
bias = None
return kernel, bias
def build_mask(self, shape, prev_sel, is_output):
if is_output:
if shape[-1] == len(self.input_sel):
input_sel = self.input_sel
else:
input_sel = self.input_sel * 2
else:
# Disallow D-1 because it would violate auto-regressive property
# Disallow unconnected units by sampling min from previous layer
input_sel = [randint(np.min(prev_sel), shape[-1] - 2) for i in range(shape[-1])]
def vals():
in_len = len(self.input_sel)
for x in range(shape[-2]):
for y in range(shape[-1]):
if is_output:
yield 1 if prev_sel[x] < input_sel[y % in_len] else 0
else:
yield 1 if prev_sel[x] <= input_sel[y] else 0
return tf.keras.backend.constant(list(vals()), dtype='float32', shape=shape), input_sel
def build(self, input_shape):
if isinstance(input_shape, list):
if len(input_shape) != 2:
raise ValueError('Only list only supported for exactly two inputs')
input_shape, other_input_shape = input_shape
# Build weights for other (non-autoregressive) vector
other_shape = (other_input_shape[-1], self.units)
self.other_kernel, self.other_bias = self.build_layer_weights(*other_shape, id='_h')
assert len(input_shape) >= 2
assert self.out_units == input_shape[-1] or self.out_units == 2 * input_shape[-1]
self.kernels, self.biases = [], []
self.kernel_masks, self.kernel_sels = [], []
self.batch_norms = []
shape = (input_shape[-1], self.units)
self.input_sel = np.arange(input_shape[-1])
if self.random_input_order:
np.random.shuffle(self.input_sel)
prev_sel = self.input_sel
for i in range(self.hidden_layers):
# Hidden layer
kernel, bias = self.build_layer_weights(*shape, id=str(i))
self.kernels.append(kernel)
self.biases.append(bias)
# Hidden layer mask
kernel_mask, kernel_sel = self.build_mask(shape, prev_sel, is_output=False)
self.kernel_masks.append(kernel_mask)
self.kernel_sels.append(kernel_sel)
prev_sel = kernel_sel
shape = (self.units, self.units)
self.batch_norms.append(tf.keras.layers.BatchNormalization(center=True, scale=True))
# Direct connection between input/output
if self.hidden_layers > 0:
direct_shape = (input_shape[-1], self.out_units)
self.direct_kernel, _ = self.build_layer_weights(
*direct_shape,
use_bias=False,
is_output=True,
id='_direct')
self.direct_kernel_mask, self.direct_sel = self.build_mask(direct_shape, self.input_sel,
is_output=True)
# Output layer
out_shape = (self.units, self.out_units)
self.out_kernel, self.out_bias = self.build_layer_weights(
*out_shape,
is_output=True,
id='_out')
self.out_kernel_mask, self.out_sel = self.build_mask(out_shape, prev_sel, is_output=True)
self.built = True
def call(self, inputs, training=None):
other_input = None
if isinstance(inputs, list):
assert len(inputs) == 2
assert self.hidden_layers > 0, "other input not supported if no hidden layers"
assert hasattr(self, 'other_kernel')
inputs, other_input = inputs
output = inputs
if other_input is not None:
other = tf.keras.backend.dot(other_input, self.other_kernel)
other = tf.keras.backend.bias_add(other, self.other_bias)
other = self.activation(other)
# Hidden layer + mask
for i in range(self.hidden_layers):
# i=0: input_dim -> masking_dim
# i>0: masking_dim -> masking_dim
weight = self.kernels[i] * self.kernel_masks[i]
output = tf.keras.backend.dot(output, weight)
# "other" input
if i == 0 and other_input is not None:
output = output + other
output = tf.keras.backend.bias_add(output, self.biases[i])
output = self.activation(output)
if self.batchnorm:
output = self.batch_norms[i](output)
output = self.dropout_wrapper(output, training)
# out_act(bias + (V dot M_v)h(x) + (A dot M_a)x + (other dot M_other)other)
# masking_dim -> input_dim
output = tf.keras.backend.dot(output, self.out_kernel * self.out_kernel_mask)
# Direct connection
if self.hidden_layers > 0:
# input_dim -> input_dim
direct = tf.keras.backend.dot(inputs, self.direct_kernel * self.direct_kernel_mask)
output = output + direct
output = tf.keras.backend.bias_add(output, self.out_bias)
output = self.out_activation(output)
output = self.dropout_wrapper(output, training)
return output
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
return (input_shape[0], self.out_units)
```
#### File: data_for_tests/loaders/utils.py
```python
import anndata
import scipy.sparse
import numpy as np
import os
import pandas as pd
import pathlib
from sfaira.data.store.stores.multi import StoresAnndata
from sfaira.versions.genomes import GenomeContainer
from sfaira.unit_tests.directories import DIR_DATA_LOADERS_CACHE, DIR_DATA_LOADERS_STORE_DAO, \
DIR_DATA_LOADERS_STORE_H5AD, save_delete
from .consts import RELEASE_HUMAN, RELEASE_MOUSE
from .loaders import DatasetSuperGroupMock
MATCH_TO_RELEASE = {"Homo sapiens": RELEASE_HUMAN,
"Mus musculus": RELEASE_MOUSE}
def _create_adata(celltypes, ncells, ngenes, assembly) -> anndata.AnnData:
"""
Usesd by mock data loaders.
"""
gc = GenomeContainer(organism=" ".join(assembly.split(".")[0].split("_")), release=assembly.split(".")[-1])
gc.set(biotype="protein_coding")
genes = gc.ensembl[:ngenes]
x = scipy.sparse.csc_matrix(np.random.randint(low=0, high=100, size=(ncells, ngenes)))
var = pd.DataFrame(index=genes)
obs = pd.DataFrame({}, index=["cell_" + str(i) for i in range(ncells)])
if len(celltypes) > 0:
obs["free_annotation"] = [celltypes[i] for i in np.random.choice(len(celltypes), size=ncells, replace=True)]
# Create random embedding
obsm = {"X_umap": np.random.random(size=(ncells, 2))}
adata = anndata.AnnData(X=x, obs=obs, obsm=obsm, var=var)
return adata
def _load_script(dsg, rewrite: bool, match_to_release):
dsg.load(allow_caching=True, load_raw=rewrite)
dsg.streamline_features(remove_gene_version=True, match_to_release=match_to_release)
dsg.streamline_metadata(schema="sfaira", clean_obs=True, clean_var=True, clean_uns=True, clean_obs_names=True)
return dsg
class PrepareData:
CLS_DSG = DatasetSuperGroupMock
def prepare_dsg(self, rewrite: bool = False, load: bool = True, match_to_release=None):
"""
Prepares data set super group of mock data and returns instance.
Use this do testing involving a data set group.
"""
# Make sure cache exists:
if not os.path.exists(DIR_DATA_LOADERS_CACHE):
pathlib.Path(DIR_DATA_LOADERS_CACHE).mkdir(parents=True, exist_ok=True)
dsg = self.CLS_DSG()
if match_to_release is None:
match_to_release = MATCH_TO_RELEASE
if load:
dsg = _load_script(dsg=dsg, rewrite=rewrite, match_to_release=match_to_release)
return dsg
def prepare_store_anndata(self, match_to_reference=None) -> StoresAnndata:
dsg = self.prepare_dsg(load=True, match_to_release=match_to_reference)
store = StoresAnndata(adatas=dsg.adata_ls)
return store
def prepare_store(self, store_format: str, rewrite: bool = False, rewrite_store: bool = False,
match_to_reference=None) -> str:
"""
Prepares mock data store and returns path to store.
Use this do testing involving a data set store.
"""
dir_store_formatted = {
"dao": DIR_DATA_LOADERS_STORE_DAO,
"h5ad": DIR_DATA_LOADERS_STORE_H5AD,
}[store_format]
if not os.path.exists(dir_store_formatted):
pathlib.Path(dir_store_formatted).mkdir(parents=True, exist_ok=True)
dsg = self.prepare_dsg(rewrite=rewrite, load=False, match_to_release=match_to_reference)
for k, ds in dsg.datasets.items():
print(k)
if store_format == "dao":
compression_kwargs = {"compressor": "default", "overwrite": True, "order": "C"}
else:
compression_kwargs = {}
if store_format == "dao":
anticipated_fn = os.path.join(dir_store_formatted, ds.doi_cleaned_id)
elif store_format == "h5ad":
anticipated_fn = os.path.join(dir_store_formatted, ds.doi_cleaned_id + ".h5ad")
else:
assert False
if rewrite_store and os.path.exists(anticipated_fn):
# Can't write if h5ad already exists.
# Delete store to writing if forced.
save_delete(anticipated_fn)
# Only rewrite if necessary
if rewrite_store or not os.path.exists(anticipated_fn):
ds = _load_script(dsg=ds, rewrite=rewrite, match_to_release=MATCH_TO_RELEASE)
ds.write_distributed_store(dir_cache=dir_store_formatted, store_format=store_format, dense=True,
chunks=128, compression_kwargs=compression_kwargs)
return dir_store_formatted
```
#### File: tests_by_submodule/versions/test_universe.py
```python
import os
from sfaira.versions.metadata import CelltypeUniverse, OntologyCl, OntologyUberon
from sfaira.unit_tests import DIR_TEMP
"""
CelltypeUniverse
"""
def test_universe_io():
if not os.path.exists(DIR_TEMP):
os.mkdir(DIR_TEMP)
tmp_fn = os.path.join(DIR_TEMP, "universe_temp.csv")
targets = ["stromal cell", "lymphocyte", "T-helper 1 cell", "T-helper 17 cell"]
leaves_target = ["stromal cell", "T-helper 1 cell", "T-helper 17 cell"]
cl = OntologyCl(branch="v2021-02-01")
uberon = OntologyUberon(branch="2019-11-22")
cu = CelltypeUniverse(cl=cl, uberon=uberon)
cu.write_target_universe(fn=tmp_fn, x=targets)
cu.load_target_universe(fn=tmp_fn)
os.remove(tmp_fn)
leaves = cu.onto_cl.convert_to_name(cu.onto_cl.leaves)
assert set(leaves) == set(leaves_target), (leaves, leaves_target)
``` |
{
"source": "JohnMoutafis/web_expert_system",
"score": 3
} |
#### File: JohnMoutafis/web_expert_system/maximum_example.py
```python
from pyknow import *
class Maximum(KnowledgeEngine):
"""
Implements pyknow example of calculating maximum of list.
"""
@Rule(~Fact(max=W()))
def init(self):
self.declare(Fact(max=0))
@Rule(
Fact(val='val' << W()),
'm' << Fact(max='max' << W()),
TEST(lambda max, val: val > max)
)
def compute_max(self, m, val, max):
self.modify(m, max=val)
@Rule(
'v' << Fact(val=W('val')),
Fact(max=W('max')),
TEST(lambda max, val: val <= max)
)
def remove_val(self, v, max, val):
self.retract(v)
@Rule('v' << Fact(max=W()) & ~Fact(val=W()))
def print_max(self, v):
self.response = v['max']
compute_max = Maximum()
``` |
{
"source": "JohnM-TX/ToxicWords",
"score": 3
} |
#### File: ToxicWords/code/logreg_sanket.py
```python
import numpy as np
import pandas as pd
import nltk
# from playsound import playsound
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score, cross_val_predict
from scipy.sparse import hstack
import timeit
import re
stemmer = nltk.stem.snowball.SnowballStemmer('english')
############### send notification on smartphone
# from urllib.parse import urlencode
# from urllib.request import Request, urlopen
# url = 'https://www.pushsafer.com/api' # Set destination URL here
# post_fields = { # Set POST fields here
# "t" : "Python code execution complete",
# "m" : "task finished" + str(k),
# "d" : "a",
# "u" : url,
# "k" : "*************"
# }
# def notify():
# request = Request(url, urlencode(post_fields).encode())
# json = urlopen(request).read().decode()
# print(json)
# notify when code has completed execution
# def audio():
# playsound('C:\\Users\\<NAME>\\Music\\notification.mp3')
train = pd.read_csv('../input/train.csv').fillna(' ')
test = pd.read_csv('../input/test.csv').fillna(' ')
'''
train['comment_text']=train['comment_text'].apply(lambda x :clean(x))
test['comment_text']=test['comment_text'].apply(lambda x :clean(x))
'''
def cleaned(comment):
comment=comment.lower()
comment=re.sub("\\n"," ",comment)
comment=re.sub("\d{1,}","",comment)
comment=re.sub("\.{1,}",".",comment)
comment=re.sub("\:{1,}","",comment)
comment=re.sub("\;|\=|\%|\^|\_"," ",comment)
comment=re.sub("\""," ",comment)
comment=re.sub("\'{2,}","",comment)
comment=re.sub("\/|\!"," ",comment)
comment=re.sub("\?"," ",comment)
comment=re.sub("\#"," ",comment)
comment=re.sub("\,|\@|\|"," ",comment)
comment=re.sub("\(|\)"," ",comment)
comment=re.sub("\S+jpg"," ",comment)
comment=re.sub("\S*wikip\S+","",comment)
comment=re.sub("\[.*?\]"," ",comment)
comment=re.sub("\-"," ",comment)
'''comment=re.sub("\"|:|@|,|\/|\=|;|\.|\'|\?|\!|\||\+|\~|\-|\#"," ",comment)
comment=re.sub("\.{1,}",".",comment)
comment=re.sub("\[.*?\]","",comment)
comment=re.sub("www\S+","",comment)
comment=re.sub("\_"," ",comment)
comment=re.sub("http","",comment)'''
comment=re.sub(r'[^\x00-\x7F]+',' ', comment) # remove non ascii
comment=re.sub("\s+"," ",comment)
comment = ' '.join( [w for w in comment.split() if len(w)>1])
comment = ' '.join( [stemmer.stem(w) for w in comment.split()])
comment = comment.strip()
return comment
print('cleaning')
train['comment_text']=train['comment_text'].apply(lambda x :cleaned(x))
test['comment_text']=test['comment_text'].apply(lambda x :cleaned(x))
# audio()
class_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
train_text = train['comment_text']
test_text = test['comment_text']
all_text = pd.concat([train_text, test_text])
'''
stopwords = nltk.corpus.stopwords.words('english')
mystopwords = "aa abc"
'''
print('words')
word_vectorizer = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='word',
token_pattern=r'\w{1,}',
stop_words='english',
ngram_range=(1, 1),
max_features=5000)
word_vectorizer.fit(all_text)
train_word_features = word_vectorizer.transform(train_text)
test_word_features = word_vectorizer.transform(test_text)
#audio()
print('chars')
char_vectorizer = TfidfVectorizer(
sublinear_tf=True,
strip_accents='unicode',
analyzer='char',
stop_words='english',
ngram_range=(2, 3),
max_features=5000)
char_vectorizer.fit(all_text)
train_char_features = char_vectorizer.transform(train_text)
test_char_features = char_vectorizer.transform(test_text)
# audio()
train_features = hstack([train_char_features, train_word_features])
test_features = hstack([test_char_features, test_word_features])
scores = []
submission = pd.DataFrame.from_dict({'id': test['id']})
predfile = pd.DataFrame.from_dict({'id': train['id']})
for class_name in class_names:
if (class_name in ['toxic']):
train_target = train[class_name]
classifier = LogisticRegression(C=0.63, solver='sag', class_weight = "balanced") # sag arge datasets and bivariate
cv_score = np.mean(cross_val_score(classifier, train_features, train_target, cv=3, scoring='roc_auc'))
cv_preds = cross_val_predict(classifier, train_features, train_target, cv=3, method='predict_proba')
predfile[class_name] = cv_preds[:, 1]
scores.append(cv_score)
print('CV score for class {} is {}'.format(class_name, cv_score))
classifier.fit(train_features, train_target)
submission[class_name] = classifier.predict_proba(test_features)[:, 1]
elif(class_name in ["severe_toxic", "insult"]):
train_target = train[class_name]
classifier = LogisticRegression(C=0.38, solver='sag') # sag large datasets and bivariate
cv_score = np.mean(cross_val_score(classifier, train_features, train_target, cv=3, scoring='roc_auc'))
cv_preds = cross_val_predict(classifier, train_features, train_target, cv=3, method='predict_proba')
predfile[class_name] = cv_preds[:, 1]
scores.append(cv_score)
print('CV score for class {} is {}'.format(class_name, cv_score))
classifier.fit(train_features, train_target)
submission[class_name] = classifier.predict_proba(test_features)[:, 1]
elif(class_name in ["threat", "identity_hate"]):
train_target = train[class_name]
classifier = LogisticRegression(C=0.45, solver='sag') # sag large datasets and bivariate
cv_score = np.mean(cross_val_score(classifier, train_features, train_target, cv=3, scoring='roc_auc'))
cv_preds = cross_val_predict(classifier, train_features, train_target, cv=3, method='predict_proba')
predfile[class_name] = cv_preds[:, 1]
scores.append(cv_score)
print('CV score for class {} is {}'.format(class_name, cv_score))
classifier.fit(train_features, train_target)
submission[class_name] = classifier.predict_proba(test_features)[:, 1]
elif(class_name == "obscene"):
train_target = train[class_name]
classifier = Ridge(alpha=20, solver='auto',max_iter=100, random_state=22, tol=0.0005)
cv_score = np.mean(cross_val_score(classifier, train_features, train_target, cv=3, scoring='roc_auc'))
cv_preds = cross_val_predict(classifier, train_features, train_target, cv=3)
predfile[class_name] = cv_preds
scores.append(cv_score)
print('CV score for class {} is {}'.format(class_name, cv_score))
classifier.fit(train_features, train_target)
submission[class_name] = classifier.predict(test_features)
print('Total CV score is {}'.format(np.mean(scores)))
# audio()
#notify()
predfile.to_csv('../ensembles/preds_logreg_sanket.csv', index=False)
submission.to_csv('../ensembles/test_logreg_sanket.csv', index=False)
```
#### File: ToxicWords/Eric_models/word_net.py
```python
from nltk.corpus import wordnet
from nltk import word_tokenize, pos_tag
from nltk.stem import WordNetLemmatizer
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return None
def lemmatize_sentence(sentence):
res = []
lemmatizer = WordNetLemmatizer()
for word, pos in pos_tag(word_tokenize(sentence)):
wordnet_pos = get_wordnet_pos(pos) or wordnet.NOUN
res.append(lemmatizer.lemmatize(word, pos=wordnet_pos))
res=" ".join(res)
return res
import pandas as pd
import numpy as np
from multiprocessing import Pool
num_partitions = 8 #number of partitions to split dataframe
num_cores = 4 #number of cores on your machine
def multiply_columns_lemmatize_sentence(data):
data=data.apply(lambda x:lemmatize_sentence(x))
return data
#parallelize 4 core nearly 13 minutes, and don't need to do it every time, just run it at once, except doing different preprocess
corpus= parallelize_dataframe(corpus, multiply_columns_lemmatize_sentence)
#store it back to the disk
pickle.dump(corpus,open("tmp.pkl", "wb"))
corpus=pickle.load(open("tmp.pkl", "rb"))
``` |
{
"source": "JohnMulligan/Herschels",
"score": 3
} |
#### File: Herschels/resources/formatSweepFile.py
```python
import numpy as np
import re
import csv
def dec_format(deg,m):
degnum=int(deg)
degnum=90-degnum
minnum=int(m)
if minnum!=0:
if degnum>0:
degnum-=1
minnum=60-minnum
else:
minnum=-minnum
if minnum<0 or degnum<0:
negative_str='-'
else:
negative_str=''
degstr=negative_str+tp(str(abs(degnum)))+"d"+tp(str(abs(minnum)))+"m0.00s"
return(degstr)
def tp(s):
if len(s)==1:
s='0'+s
return s
def date_format(datestr):
m,d,y=datestr.split('-')
d=tp(d)
m=tp(m)
return('-'.join([y,m,d]))
def formatSweepFile(inputSweeps, startInt):
a = open(inputSweeps, 'r')
b = open('default_sweep.ini','w')
outputstr=''
index=startInt
with open(inputSweeps) as csvfile:
reader=csv.DictReader(csvfile)
for row in reader:
date=date_format(row['date'])+"T20:40:00.0"
startRA=tp(row['startRAh'])+'h'+tp(row['startRAm'])+'m'+'0.00s'
endRA=tp(row['endRAh'])+'h'+tp(row['endRAm'])+'m'+'0.00s'
startDec=dec_format(row['startDecd'],row['startDecm'])
endDec=dec_format(row['endDecd'],row['endDecm'])
name='Sweep'+row['name']
block_dict={'name':name,'startRA':startRA,'endRA':endRA,'startDec':startDec,'endDec':endDec,'date':date}
for k in block_dict:
outputstr+='\n'+str(index)+'\\'+k+'='+block_dict[k]
index+=1
headers='[General]\narrow_scale=1.5\nsweep_count=%d\nsweeps_version=2.0\nuse_decimal_degrees=false\nuse_semi_transparency=false\n\n[sweep]' %(index+1)
b.write(headers+outputstr)
a.close()
b.close()
formatSweepFile('sweep_data.csv', 0)
``` |
{
"source": "JohnMulligan/u-app",
"score": 3
} |
#### File: core/tests/test_models.py
```python
from django.test import TestCase
##there is a get user model helper in django
from django.contrib.auth import get_user_model
#define a custom model for tests, building on the TestCase model
#and presumably we'll be using the native user model he defined
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = '<EMAIL>'
password = '<PASSWORD>'
user=get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email,email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
#est the email for a new user is normalized by our user class function
email='<EMAIL>'
user=get_user_model().objects.create_user(email,'test')
self.assertEqual(user.email)
``` |
{
"source": "johnmurphy007/moviedata",
"score": 2
} |
#### File: dockerfile/wsgi/app.py
```python
from flask import Flask
from flask import request, render_template
import re
import sys
import pymongo
from pymongo import MongoClient
import urlparse
import requests
import logging
import os
from flask import json
from bson.objectid import ObjectId
import ast # to convert unicode to dict
#import scanForFilms
# coding: utf-8
import paho.mqtt.client as mqtt
#mqtt info:
def mqtt_publish(topic, payload):
host_mqtt = '192.168.1.71'
port_mqtt = 1883 # SSL/TLS = 8883
mqttc = mqtt.Client('python_pub')
mqttc.connect(host_mqtt, port_mqtt)
mqttc.publish(topic, payload)
mqttc.loop(2) #timeout = 2s
return
app = Flask(__name__)
app.logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_formatter = logging.Formatter('[%(asctime)s] [%(module)s:%(lineno)d][%(levelname)s] %(message)s')
stream_handler.setFormatter(stream_formatter)
app.logger.addHandler(stream_handler)
# global variable (not used at present)
config_file = "config.json"
if ('DB_PORT_27017_TCP_ADDR' in os.environ):
host = os.environ['DB_PORT_27017_TCP_ADDR']
else:
host = '172.17.0.1'
client = MongoClient(host, 27017)
db = client.movies # db = client.primer
def convert(input):
if isinstance(input, dict):
return dict((convert(key), convert(value)) for key, value in input.iteritems())
elif isinstance(input, list):
return [convert(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
@app.route('/', methods=["GET"])
def route_getbase():
app.logger.info('/ GET url')
genres, directors, films = getBasicMetadata()
posts = db.movies.find()
return render_template('home.html', genres=genres, directors=directors, posts=films)
# Work in Progess - requires wsgi Container to have visibility on folders that videos are in.
@app.route('/movieinfo/scan', methods=["GET"])
def route_getmoviescan():
app.logger.info('/movieinfo/scan GET url')
# Call scanForFilms to scan/add movies to mongodB:
mqtt_topic = 'hello/world'
mqtt_payload = 'scanForFilms'
mqtt_publish(mqtt_topic, mqtt_payload)
# Insert mqtt call to trigger python call:
#scanForFilms.main()
page = 1
pagesize = 25
skip = page * pagesize
posts = db.movies.find().sort(('Title'), pymongo.ASCENDING).limit(pagesize).skip(skip)
return render_template('movieinfoall.html', posts=posts, page=page)
# @app.route('/movieinfo/delete/', methods=["GET"])
# def route_getmoviedelete():
# app.logger.info('/movieinfo/delete GET url')
# empty = db.movies.remove({"Title":""})
# app.logger.info("deleted an item?")
#
# page = 1
# pagesize = 25
# skip = page * pagesize
# posts = db.movies.find().sort(('Title'), pymongo.ASCENDING).limit(pagesize).skip(skip)
#
# return render_template('movieinfoall.html', posts=posts, page=page)
@app.route('/movieinfo/delete/<imdbid>/<page>', methods=["GET"])
def route_getmoviedeleteimdbid(imdbid, page):
app.logger.info('/movieinfo/delete/<imdbid>/<page> GET url')
if imdbid:
app.logger.info(imdbid)
# Remove record:
post = db.movies.delete_one({'_id': ObjectId(imdbid)})
if page:
page = int(page)
else:
page = 1
pagesize = 25
skip = page * pagesize
posts = db.movies.find().sort(('Title'), pymongo.ASCENDING).limit(pagesize).skip(skip)
return render_template('movieinfoall.html', posts=posts, page=page)
@app.route('/movieinfo/all', methods=["GET"])
def route_getmovieinfoall():
app.logger.info('/movieinfo/all GET url')
url = request.values # Get value from GET(/POST) request
page = 1
if 'page' in url:
page = int(url['page'])
pagesize = 25
skip = page * pagesize
app.logger.info(skip)
posts = db.movies.find().sort(('Title'), pymongo.ASCENDING).limit(pagesize).skip(skip)
return render_template('movieinfoall.html', posts=posts, page=page)
@app.route('/movieinfo/film', methods=["GET"])
def route_getmovieinfofilm():
app.logger.info('/movieinfo/film GET url')
url = request.values # Get value from GET(/POST) request
if 'moviename' in url:
search = url['moviename']
# Get matching entries
posts = db.movies.find({'Title': {'$regex': search, "$options": "$i"}}).sort(('Title'), pymongo.DESCENDING)
else:
# Get all entries
posts = db.movies.find().sort(('Title'), pymongo.DESCENDING)
return render_template('movieinfofilm.html', posts=posts)
@app.route('/movieinfo/genre', methods=["GET"])
def route_getmoviegenre():
app.logger.info('/movieinfo/genre GET url')
url = request.values # Get value from GET(/POST) request
genres, directors, posts = getBasicMetadata()
if url.keys(): # Get keys of url and add them to array
genrelist = url.keys()
app.logger.info(genrelist)
search = '|'.join(genrelist)
app.logger.info(search)
posts = db.movies.find({'Genre': {'$regex': search, "$options": "$i"}}).sort(('imdbRating'), pymongo.DESCENDING)
return render_template('movieinfogenre.html', posts=posts, genres=genres)
@app.route('/movieinfo/director', methods=["GET"])
def route_getmoviedirector():
app.logger.info('/movieinfo/director GET url')
url = request.values # Get value from GET(/POST) request
genres, directors, posts = getBasicMetadata()
if 'director' in url:
# Get matching entries
search = url['director']
app.logger.info(search)
posts = db.movies.find({'Director': {'$regex': search, "$options": "$i"}}).sort(('Title'), pymongo.DESCENDING)
else:
# Get all entries
posts = db.movies.find().sort(('Title'), pymongo.DESCENDING)
return render_template('movieinfodirector.html', posts=posts, directors=directors)
@app.route('/movieinfo/imdb', methods=["GET"])
def route_getmovieimdb():
app.logger.info('/movieinfo/imdb GET url')
url = request.values # Get value from GET(/POST) request
if 'sortby' in url:
if url['sortby'] == "asc":
operator = "$gte"
elif url['sortby'] == "desc":
operator = "$lte"
else:
operator = "$eq"
if 'imdbrating' in url:
imdbrating = url['imdbrating']
if 'optsortby' in url:
opt_operator = ''
if url['optsortby'] == "asc":
opt_operator = "$gte"
elif url['optsortby'] == "desc":
opt_operator = "$lte"
elif url['optsortby'] == "equal":
opt_operator = "$eq"
if opt_operator:
app.logger.info(opt_operator)
else:
app.logger.warn("Not defined!")
if 'optimdbrating' in url:
opt_imdbrating = url['optimdbrating']
app.logger.info(opt_imdbrating)
if 'sort' in url:
sort = url['sort']
app.logger.info(sort)
if opt_operator and opt_imdbrating:
# posts = db.movies.find({"imdbRating": {operator: imdbrating, "$ne": "N/A", opt_operator: opt_imdbrating}}).sort(('imdbRating'), pymongo.DESCENDING).limit(pagesize).skip(page*pagesize)
if sort == "DESCENDING":
posts = db.movies.find({"imdbRating": {operator: imdbrating, "$ne": "N/A", opt_operator: opt_imdbrating}}).sort(('imdbRating'), pymongo.DESCENDING)
else:
posts = db.movies.find({"imdbRating": {operator: imdbrating, "$ne": "N/A", opt_operator: opt_imdbrating}}).sort(('imdbRating'), pymongo.ASCENDING)
else:
if sort == "DESCENDING":
# posts = db.movies.find({"imdbRating": {operator: imdbrating, "$ne": "N/A"}}).sort(('imdbRating'), pymongo.DESCENDING).limit(pagesize).skip(page*pagesize)
posts = db.movies.find({"imdbRating": {operator: imdbrating, "$ne": "N/A"}}).sort(('imdbRating'), pymongo.DESCENDING)
else:
posts = db.movies.find({"imdbRating": {operator: imdbrating, "$ne": "N/A"}}).sort(('imdbRating'), pymongo.ASCENDING)
return render_template('movieinfoimdb.html', posts=posts)
@app.route('/movieinfo', methods=["GET"])
def route_getexample():
app.logger.info('/movieinfo GET X url')
#url = request.values # Get value from GET(/POST) request
url = request.args.get('add')
url = url[1:len(url)-1]
url = convert(url)
app.logger.info(url)
app.logger.info(type(url))
if 'moviename' in url:
app.logger.info('moviename found in url')
posts = db.movies.find({"Title": url['moviename']}).sort(('Title'), pymongo.DESCENDING)
found = posts.count()
return render_template('index.html', posts=posts, found=found)
if url:
moviejson = {}
interim = ast.literal_eval(url)
for item in interim:
moviejson[item] = interim[item]
#temp1 = url[0] # url[i] is unicode
#app.logger.info("get json! = " + str(temp1))
app.logger.info(moviejson)
# Strip '[' & ']' from temp, use ast to convert unicode dict string to real dict.
#moviejson = ast.literal_eval(temp[1:len(temp)-1])
app.logger.info(type(moviejson))
app.logger.info(moviejson)
posts = db.movies.insert_one(moviejson)
posts = db.movies.find({"Title": moviejson['Title']})
found = 1
return render_template('index.html', posts=posts, found=found)
posts = json.dumps({'text': '1234'})
found = 0
return render_template('index.html', posts=posts, found=found)
@app.route('/movieinfo', methods=["POST"])
def route_postexample():
app.logger.info('/movieinfo POST url')
httpsearch = request.form['text']
app.logger.info(httpsearch)
posts = db.movies.find({"Title": httpsearch})
app.logger.info(posts.count())
if posts.count() > 0:
found = 1
return render_template('index.html', posts=posts, found=found)
else:
posts = getmatch(httpsearch)
if posts:
found = "yes"
else:
posts = {"Title": "X-men"} # Dummy data
found = 0
return render_template('index.html', posts=posts, found=found)
@app.route('/image', methods=["GET"])
def route_getimage():
app.logger.info('/image GET url')
genres, directors, films = getBasicMetadata()
moviejson = db.movies.find({"Title": "Fargo"}).limit(1)
app.logger.info(moviejson)
getPoster(moviejson)
posts = db.movies.find()
return render_template('home.html', genres=genres, directors=directors, posts=films)
def getBasicMetadata():
alltype = db.movies.find()
genres = []
directors = []
films = []
for film in alltype:
if "Genre" in film:
genrefile = film['Genre'].split(",")
for i in genrefile:
genres.append(i.strip())
if "Director" in film:
dirfile = film['Director'].split(",")
for i in dirfile:
directors.append(i.strip())
if "Title" in film:
films.append(film['Title'])
gen = list(set(genres))
dirs = list(set(directors))
return gen, dirs, list(set(films))
def getPoster(cursor):
for moviejson in cursor:
app.logger.info(moviejson)
if "Poster" in moviejson:
app.logger.info(moviejson['Poster'])
image = requests.get(moviejson['Poster'])
poster = str(moviejson['Poster'])
index = poster.rfind('.')
ext = poster[index + 1:]
name = str(moviejson['Title'])
try:
with open(name + '.' + ext, "wb") as code1:
#app.logger.info(image.content)
code1.write(image.content)
code1.close()
except:
pass
return
def getmatch(film):
movielist = []
baseUrl = "http://www.omdbapi.com/"
try:
r = requests.get(baseUrl + "?t="+film+"&y=&plot=long&r=json")
app.logger.info(r.status_code)
moviejson = r.json()
#app.logger.info(moviejson)
if 'Awards' in moviejson:
app.logger.info("Found Awards in moviejson")
del moviejson['Awards']
app.logger.info(moviejson)
except requests.exceptions.RequestException as e:
app.logger.warn(e)
sys.exit(1)
app.logger.info(moviejson)
movielist.append(moviejson)
return movielist # str(db.users.find().pretty())
###########################################
# WIP Stuff
###########################################
def writeConfig(json_to_write):
with open(config_file, mode='w') as out:
res = json.dump(
json_to_write,
out,
sort_keys=True,
indent=4,
separators=(
',',
': '))
out.close()
return
def readConfig():
with open(config_file, mode='r') as out:
input_json = json.load(out)
out.close()
return input_json
@app.route('/options', methods=["GET"])
def route_getoptions():
app.logger.info('/options GET url')
genres, directors, posts = getBasicMetadata()
url = request.values # Get value from GET(/POST) request
posts = {"Title": "X-men"}
app.logger.info(url)
if len(url) == 1:
query = {}
value = url.values() # Get values from dict
query['Genre'] = value[0]
posts = db.movies.find(query).sort(('imdbRating'), pymongo.DESCENDING)
app.logger.info(value[0])
else:
query = []
for u in url:
querydict = {}
querydict['Genre'] = url[u]
query.append(querydict)
app.logger.info(query)
posts = db.movies.find({'$or': query}).sort(('imdbRating'), pymongo.DESCENDING)
app.logger.info(posts)
page = 1
if 'page' in url:
page = int(url['page'])
pagesize = 20
if 'pagesize' in url:
pagesize = str(url['pagesize'])
#posts = db.movies.find({"Genre": "Adventure"})
posts = db.movies.find().sort(('Title'), pymongo.DESCENDING).limit(pagesize).skip(page*pagesize)
#for f in posts:
# app.logger.info(f)
# result = db.test.delete_one({'x': 1})
# directors = getDirector()
return render_template('displayOptions.html', genres=genres, directors=directors, posts=posts, page=page, pagesize=pagesize)
@app.route('/options', methods=["POST"])
def route_postoptions():
app.logger.info('/options POST url')
text1 = request.form['0']
app.logger.info(text1)
genres, directors = getGenre()
# directors = getDirector()
# bb
return render_template('displayOptions.html', genres=genres, directors=directors)
# List of reference accesses via pymongo that I've tried:
# posts = db.movies.find({'Title': '/.*Sup.*/'})
# posts = db.movies.find({"Genre": {"$elemMatch": {"$eq": "Action", "$eq": "Comedy"}}})
# posts = db.movies.find({"$or": [{"Genre": {"$in": genrelist}}]})
# posts = db.movies.find({"$where": 'function() {var genre = this.Genre.split(","); for (i = 0; i < genre.length; i++) { if (genre == "Action") return this.genre; } }'})
# db.inventory.find( { $or: [ { quantity: { $lt: 20 } }, { price: 10 } ] })
# posts = db.movies.find({"Genre": "Action, Adventure, Drama"})
# posts = db.movies.find({"Genre": { $elemMatch: {"$in": genrelist}}})
# posts = db.movies.find({"Genre": {"$elemMatch": {"Genre": genrelist}}})
# posts = db.movies.find()
# posts = db.movies.find({"Genre": { "$in": genrelist}})
# posts = db.movies.find({"Genre": { "$in": genrelist}})
# posts = db.movies.find({"Genre": { $elemMatch: {"$in": genrelist}}})
# posts = db.movies.find()
# resultdb = db.movies.insert_one(moviejson)
# moviejson = db.movies.find({"Title": "Fargo"}).limit(1)\
def getlink(full_path_file_name, return_type):
path_file_name = full_path_file_name.split('/')
if len(path_file_name) > 1:
filename = path_file_name[len(path_file_name)-1]
path = path_file_name[0]
for p in range(1, len(path_file_name)-1):
path = path + '/' + path_file_name[p]
else:
filename = path_file_name[0]
path = ''
if return_type == "filename":
return filename
else:
return path
```
#### File: dockerfile/wsgi/readFilmListAndAddToDB.py
```python
import os
import re
import sys
import time
from pymongo import MongoClient
import requests
import logging
from flask import json
from flask import Flask
# Setting static_folder=None disables built-in static handler.
app = Flask(__name__) # static_url_path='')
app.logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_formatter = logging.Formatter('[%(asctime)s] [%(module)s:%(lineno)d] [%(levelname)s] %(message)s')
stream_handler.setFormatter(stream_formatter)
app.logger.addHandler(stream_handler)
# global variable
config_file = "config.json"
configjson = readConfig()
if ('DB_PORT_27017_TCP_ADDR' in os.environ):
host = os.environ['DB_PORT_27017_TCP_ADDR']
else:
host = configjson["db_host"]
client = MongoClient(host, 27017)
db = client.movies
def searchformovies(path1):
'''
Recursively search the folder location ('path1') for films that match
the film formats as specified in the 'config.json' file. The config.json
uses a key:value pair, where:
key = format of movie file
value = if a value other than '/' is specified, then the path is
truncated by this value.
For example, VOB files typically are found in a subfolder called:
"VIDEO_TS". Want to get the name of the folder that this folder
is contained in. Specifying a key: value pair of
"VOB": "/VIDEO_TS" will remove the /VIDEO_TS from the path.
'''
# avi, mov, mp4, .mkv, .vob, .ogg, .wmv, .mp2
# if .vob: folder will be VIDEO_TS (need to filter back for this)
films_to_search_for = configjson['movie_file_format']
filmformats = films_to_search_for.keys()
result = []
app.logger.info("Search for Movies")
app.logger.info(path1)
for path, dirs, files in os.walk(path1):
print path, dirs, files
sys.exit(0)
if files:
for indfile in files:
for formattype in filmformats:
if indfile.endswith("."+str(formattype)):
# next test is geared towards VOB files which are
# in a subfolder called VIDEO_TS
moviename = path.split("/")
if path.find(films_to_search_for[formattype]):
movie = moviename[len(moviename)-2]
else:
movie= moviename[len(moviename)-1]
result.append(movie)
app.logger.info("Finished scanning")
app.logger.info(result)
scannedmovies = set(result) # extract unique values
return scannedmovies
def processdir(dirname):
'''
Process the 'dirname' for the name of the film and the year of the
film (if it is there). The method return the name and year of film.
'''
name = ""
year = ''
if len(dirname) > 6:
if dirname[len(dirname) - 1] == ")":
if dirname[len(dirname) - 6] == "(":
year = dirname[len(dirname)-5:len(dirname) - 1]
name = dirname[:len(dirname) - 6]
name = name.strip()
elif dirname[len(dirname)-5] == " ":
try:
year = int(dirname[len(dirname)-4:])
name = dirname[:len(dirname) - 5]
name = name.strip()
if name[len(name) - 1] == "-":
name = name[:len(name-1)].strip()
except:
name = dirname
pass
else:
name = dirname
return name, year
def getfilmdata(film, year, fullpathtomovie):
'''
Get film metadata from omdbapi.com
'''
# Read config.py file
apikey = configjson["apikey"]
baseUrl = "http://www.omdbapi.com/?apikey=" + str(apikey)
try:
if year:
year = str(year)
else:
year = ''
except:
year = ''
try:
r = requests.get(baseUrl + "&t="+film+"&y="+year+"&plot=full&r=json")
app.logger.info(r.status_code)
moviejson = r.json() # capture json data
except requests.exceptions.RequestException as e:
app.logger.warn(e)
if r.status_code == 200:
app.logger.info("Match found on omdbapi")
moviejson['naslocation'] = fullpathtomovie
resultdb = db.movies.insert_one(moviejson)
app.logger.info("Adding New Film: Name = "+str(name)+": "+str(resultdb.inserted_id))
return None
return fullpathtomovie
def main():
with open('movies.txt','r') as f:
movies = f.read().splitlines()
movielisterror = []
for movie in movies:
name, year = processdir(movie)
app.logger.info(name)
app.logger.info(year)
# Check for match in MongoDB:
if year:
_items = db.movies.find_one({"Title": name, "Year": year})
else:
_items = db.movies.find_one({"Title": name})
if _items is None:
# If movie not found in MongoDB, get metdata from omdbapi:
newfilmresult = getfilmdata(name, year, movie)
if newfilmresult is not None:
movielisterror.append(newfilmresult)
else:
app.logger.info("Film in database already")
time.sleep(90)
if movielisterror:
try:
with open("errorfile.txt", 'w') as outfile:
outfile.writelines(errorfile)
outfile.close()
except EnvironmentError:
app.logger.warn('error writing to file')
return
def readConfig():
with open(config_file, mode='r') as out:
input_json = json.load(out)
out.close()
return input_json
if __name__ == '__main__':
main()
```
#### File: dockerfile/wsgi/scanForFilms.py
```python
import os
import re
import sys
import time
from pymongo import MongoClient
import requests
import logging
from flask import json
from flask import Flask
import paho.mqtt.client as mqtt
#mqtt info - ADD A PUBLISH MESSAGE AT END OF 'MAIN' IF PROGRAM IS SUCCESSFUL or "FAIL" if fail the try/except:
def mqtt_publish(topic, payload):
host_mqtt = '192.168.1.71'
port_mqtt = 1883 # SSL/TLS = 8883
mqttc = mqtt.Client('python_pub')
mqttc.connect(host_mqtt, port_mqtt)
mqttc.publish(topic, payload)
mqttc.loop(2) #timeout = 2s
return
# Setting static_folder=None disables built-in static handler.
app = Flask(__name__) # static_url_path='')
app.logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_formatter = logging.Formatter('[%(asctime)s] [%(module)s:%(lineno)d] [%(levelname)s] %(message)s')
stream_handler.setFormatter(stream_formatter)
app.logger.addHandler(stream_handler)
# global variable
config_file = "config.json"
def searchformovies(path1):
'''
Recursively search the folder location ('path1') for films that match
the film formats as specified in the 'config.json' file. The config.json
uses a key:value pair, where:
key = format of movie file
value = if a value other than '/' is specified, then the path is
truncated by this value.
For example, VOB files typically are found in a subfolder called:
"VIDEO_TS". Want to get the name of the folder that this folder
is contained in. Specifying a key: value pair of
"VOB": "/VIDEO_TS" will remove the /VIDEO_TS from the path.
'''
# avi, mov, mp4, .mkv, .vob, .ogg, .wmv, .mp2
# if .vob: folder will be VIDEO_TS (need to filter back for this)
configjson = readConfig()
films_to_search_for = configjson['movie_file_format']
filmformats = films_to_search_for.keys()
result = []
app.logger.info("Search for Movies")
app.logger.info(path1)
for path, dirs, files in os.walk(path1):
if files:
for indfile in files:
for formattype in filmformats:
if indfile.endswith("."+str(formattype)):
# next test is geared towards VOB files which are
# in a subfolder called VIDEO_TS
moviename = path.split("/")
if path.find(films_to_search_for[formattype]):
movie = moviename[len(moviename)-2]
else:
movie= moviename[len(moviename)-1]
result.append(movie)
app.logger.info("Finished scanning")
app.logger.info(result)
scannedmovies = set(result) # extract unique values
return scannedmovies
def main():
# ADD MQTT PUBLISH TO THIS. WRAP IT ALL IN A TRY/EXCEPT...mqtt pub good if pass and mqtt pub bad if fail.
# Read config.py file
configjson = readConfig()
path_to_search = configjson['path_to_search']
app.logger.info(path_to_search)
# scan folders for movies. Array/list returned and captured in 'movies'
movies = searchformovies(path_to_search)
with open('movies.txt', 'w') as f:
for item in movies:
f.write("%s\n" % item)
def readConfig():
#config_file # = "config.json"
with open(config_file, mode='r') as out:
input_json = json.load(out)
out.close()
return input_json
if __name__ == '__main__':
main()
``` |
{
"source": "johnmusiu/prime_tdd",
"score": 4
} |
#### File: johnmusiu/prime_tdd/prime.py
```python
def isPrime(number):
# if number in
for i in range(2, number):
if number % i == 0:
return False
return True
def prime_tdd(number):
"""
input
number(int) -> this is a positive number
outputs:
results(list) -> list of numbers
"""
if not isinstance(number, int):
return "Unexpected non integer input"
if number < 0:
return "error"
results = []
for i in range(2, number+1):
if isPrime(i):
results.append(i)
return results
```
#### File: johnmusiu/prime_tdd/prime_tdd.py
```python
from prime import prime_tdd
import unittest
class TestPrime(unittest.TestCase):
def test_negative(self, number=-20):
results = prime_tdd(number)
assert(results == "error")
def test_small_positive_numbers(self, number=2):
results = prime_tdd(number)
assert(results == [2])
def test_middle_positive(self, number=20):
results = prime_tdd(number)
assert(results == [2, 3, 5, 7, 11, 13, 17, 19])
# def test_large_numbers(self, number=1000):
# results = prime_tdd(number)
# assert(results == [])
def test_unexpected_input(self, number="string"):
results = prime_tdd(number)
assert(results == "Unexpected non integer input")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johnmuth81/cloud-compare",
"score": 2
} |
#### File: management/commands/scrape.py
```python
import os
import httplib
import zlib
import json
import time
from django.core.management.base import BaseCommand, CommandError
# Slurp a price file from the internet into memory.
# returns raw data (typically JSON)
def get_price_data(host, path):
conn = httplib.HTTPSConnection(host)
# get root index
conn.request('GET', path)
res = conn.getresponse()
if res.status != httplib.OK:
# TBD #### Logging
raise CommandError('root index: unexpected status %d' % (res.status))
# Read in raw data
roots = res.read()
# Get content encoding from header. decode is gzip
enc = res.getheader('content-encoding', 'text')
if enc == 'gzip':
roots = zlib.decompress(roots, 16+zlib.MAX_WBITS)
conn.close()
return roots
# Convert a pathname to a flat file name
# eg. /foo/bar.x -> foo.bar.x
def path2name(path):
npath = path.replace('/', '.')
if npath[0] == '.':
npath = npath[1:]
return npath
# Host and path for AWS pull
AWS_HOST = 'pricing.us-east-1.amazonaws.com'
AWS_PATH = '/offers/v1.0/aws/index.json'
def pull_aws(dir):
host = AWS_HOST
path = AWS_PATH
data = get_price_data(host, path)
# should be JSON, attempt decode
js = None
try:
js = json.loads(data)
except:
# TBD #### Logging
pass
tstr = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
meta = {
'cloudProvider': "Amazon",
'host': host,
'path': path,
'time': tstr
}
# Write data to index
f = open('%s/%s' % (dir, path2name(path)), "w")
f.write(data)
f.close()
# Loop through offers
for o in js['offers']:
od = js['offers'][o]
print path2name(od['currentVersionUrl'])
data = get_price_data(host, od['currentVersionUrl'])
f = open('%s/%s' % (dir, path2name(od['currentVersionUrl'])), "w")
f.write(data)
f.close()
print path2name(od['versionIndexUrl'])
data = get_price_data(host, od['versionIndexUrl'])
f = open('%s/%s.history.json' %
(dir, path2name(od['versionIndexUrl'])), "w")
f.write(data)
f.close()
# Write out metadata
f = open('%s/META' % (dir), 'w')
f.write(json.dumps(meta))
f.close()
GCP_HOST = 'cloudpricingcalculator.appspot.com'
GCP_PATH = '/static/data/pricelist.json'
def pull_gcp(dir):
host = GCP_HOST
path = GCP_PATH
tstr = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
meta = {
'cloudProvider': "Google",
'host': host,
'path': path,
'time': tstr
}
data = get_price_data(host, path)
# should be JSON, attempt decode
js = None
try:
js = json.loads(data)
except:
# TBD #### Logging
pass
print path2name(path)
# Write data into directory
f = open('%s/%s' % (dir, path2name(path)), 'w')
f.write(data)
f.close()
# Write out metadata
f = open('%s/META' % (dir), 'w')
f.write(json.dumps(meta))
f.close()
pass
class Command(BaseCommand):
help = 'Scrapes pricing data from AWS or GCP to local files'
def add_arguments(self, parser):
parser.add_argument("directory", nargs=1,
help='Directory to place scraped data')
parser.add_argument('--aws', required=False, action='store_true',
help='Scrape data from Amazon AWS')
parser.add_argument('--gcp', required=False, action='store_true',
help='Scrape data from Google GCP')
def handle(self, *args, **options):
# Only one of --aws or --gcp is required
if options['aws'] == options['gcp']:
raise CommandError('one (and only one) of --aws or --gcp required')
directory = options['directory'][0]
# make sure the directory exists
if not os.path.isdir(directory):
raise CommandError('%s is not a directory' % directory)
# spool-in aws if needed
if options['aws']:
pull_aws(directory)
# spool-in gcp if needed
if options['gcp']:
pull_gcp(directory)
``` |
{
"source": "johnmwangi/Gallery_jpG",
"score": 3
} |
#### File: Gallery_jpG/photoapp/tests.py
```python
from django.test import TestCase
from .models import *
# Create your tests here.
class ImageTest(TestCase):
# # def class instance setup for the project
# def setUp(self):
# self.nairobi = Location(name='nairobi')
# self.nairobi.save()
#
# self.nature = Category(name='nature')
# self.nature.save()
#
# self.new_image = Image(name="New Image",description="An Image",location=self.nairobi,category=self.nature)
# self.new_image.save()
#
# # def a testcase for instance of the drinks class
def test_instance(self):
self.new_image.save()
self.assertTrue(isinstance(self.new_image, Image))
#
# def test_delete_image(self):
# self.drinks.save()
# self.drinks.delete()
# self.assertTrue(len(Image.objects.all()) == 0)
#
# def test_update(self):
# self.drinks.save()
# self.drinks.name = 'MoreDrinks'
# self.assertTrue(self.drinks.name == 'MoreDrinks')
#
# def test_all_images(self):
# self.drinks.save()
# images = Image.all_images()
# self.assertTrue(len(images) > 0)
#
# def test_search_by_category(self):
# self.drinks.save()
# images = Image.search_by_category('fun')
# self.assertTrue(len(images) > 0)
#
# def test_view_location(self):
# self.drinks.save()
# location = Image.view_location(self.nairobi)
# self.assertTrue(len(location) > 0)
#
# def test_view_category(self):
# self.drinks.save()
# categories = Image.view_category(self.music)
# self.assertTrue(len(categories) > 0)
class CategoriesTest(TestCase):
def setUp(self):
self.nature = Category(name='nature')
def test_instance(self):
self.nature.save()
self.assertTrue(isinstance(self.nature, Category))
def test_save(self):
self.nature.save_category()
categories = Category.objects.all()
self.assertTrue(len(categories)>0)
class LocationTest(TestCase):
def setUp(self):
self.nairobi = Location(name='nairobi')
def test_instance(self):
self.nairobi.save()
self.assertTrue(isinstance(self.nairobi, Location))
def test_save(self):
self.nairobi.save_location()
locations = Location.objects.all()
self.assertTrue(len(locations)>0)
``` |
{
"source": "johnmwangi/Moringa_Tribune",
"score": 2
} |
#### File: Moringa_Tribune/news/views.py
```python
from .forms import NewArticleForm, NewsLetterForm
from django.shortcuts import render,redirect
from django.http import HttpResponse, Http404
import datetime as dt
from .models import Article,NewsLetterRecipients
from django.http import HttpResponse, Http404,HttpResponseRedirect
from .email import send_welcome_email
from django.contrib.auth.decorators import login_required
from rest_framework.response import Response
from rest_framework.views import APIView
from .models import MoringaMerch
from .serializer import MerchSerializer
from rest_framework import status
from .permision import IsAdminOrReadOnly
from rest_framework import status
from django.http import JsonResponse
def news_today(request):
if request.method == 'POST':
form = NewsLetterForm(request.POST)
if form.is_valid():
name = form.cleaned_data['your_name']
email = form.cleaned_data['email']
#.................
return render(request, 'all-news/today-news.html', {"date": date,"news":news,"letterForm":form})
def newsletter(request):
name = request.POST.get('your_name')
email = request.POST.get('email')
recipient = NewsLetterRecipients(name=name, email=email)
recipient.save()
send_welcome_email(name, email)
data = {'success': 'You have been successfully added to mailing list'}
return JsonResponse(data)
# Create your views here.
# def past_days_news(request, past_date):
# try:
# # Converts data from the string Url
# date = dt.datetime.strptime(past_date, '%Y-%m-%d').date()
# except ValueError:
# # Raise 404 error when ValueError is thrown
# raise Http404()
# assert False
#
# if date == dt.date.today():
# return redirect(news_today)
#
# news = Article.days_news(date)
# return render(request, 'all-news/past-news.html',{"date": date,"news":news})
def news_of_day(request):
date = dt.date.today()
news = Article.objects.all()
form = NewsLetterForm()
return render(request, 'all-news/today-news.html', {"date": date, "news": news, "letterForm": form})
def past_days_news(request,past_date):
try:
# Converts data from the string Url
date = dt.datetime.strptime(past_date,'%Y-%m-%d').date()
except ValueError:
# Raise 404 error when ValueError is thrown
raise Http404()
assert False
if date == dt.date.today():
return redirect(news_of_day)
return render(request, 'all-news/past-news.html', {"date":date})
def search_results(request):
if 'article' in request.GET and request.GET["article"]:
search_term = request.GET.get("article")
searched_articles = Article.search_by_title(search_term)
message = f"{search_term}"
return render(request, 'all-news/search.html',{"message":message,"articles": searched_articles})
else:
message = "You haven't searched for any term"
return render(request, 'all-news/search.html',{"message":message})
@login_required(login_url='/accounts/login/')
def new_article(request):
current_user = request.user
if request.method == 'POST':
form = NewArticleForm(request.POST, request.FILES)
if form.is_valid():
article = form.save(commit=False)
article.editor = current_user
article.save()
return redirect('NewsToday')
else:
form = NewArticleForm()
return render(request, 'new_article.html', {"form": form})
def article(request,article_id):
try:
article = Article.objects.get(id = article_id)
except DoesNotExist:
raise Http404()
return render(request,"all-news/article.html", {"article":article})
if request.method == 'POST':
form = NewsLetterForm(request.POST)
if form.is_valid():
name = form.cleaned_data['your_name']
email = form.cleaned_data['email']
recipient = NewsLetterRecipients(name = name,email =email)
recipient.save()
HttpResponseRedirect('news_today')
else:
form = NewsLetterForm()
return render(request, 'all-news/today-news.html', {"date": date,"news":news,"letterForm":form})
def newsletter(request):
name = request.POST.get('your_name')
email = request.POST.get('email')
recipient = NewsLetterRecipients(name=name, email=email)
recipient.save()
send_welcome_email(name, email)
data = {'success': 'You have been successfully added to mailing list'}
return JsonResponse(data)
class MerchList(APIView):
def get(self, request, format=None):
all_merch = MoringaMerch.objects.all()
serializers = MerchSerializer(all_merch, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = MerchSerializer(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
permission_classes = (IsAdminOrReadOnly.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
``` |
{
"source": "johnmwangi/News-Highlight",
"score": 4
} |
#### File: News-Highlight/app/models.py
```python
class Article:
'''
Class that instantiates objects of the news article objects of the news source
'''
def __init__(self,author,description,time,url,image,title):
self.author = author
self.description = description
self.time = time
self.url = url
self.image = image
self.title = title
class Category:
'''
Class that instantiates objects of the news category objects of the news source
'''
def __init__(self,author,description,time,url,image,title):
self.author = author
self.description = description
self.time = time
self.url = url
self.image = image
self.title = title
class Source:
'''
source class to define source clsss
'''
def __init__(self,id,name,description,url):
self.id = id
self.name = name
self.description = description
self.url = url
class Headline:
'''
Class that instantiates objects of the news haedline objects of the news source
'''
def __init__(self,author,description,time,url,image,title):
self.author = author
self.description = description
self.time = time
self.url = url
self.image = image
self.title = title
``` |
{
"source": "johnmwangi/PitcH",
"score": 3
} |
#### File: PitcH/tests/test_pitch.py
```python
import unittest
from app.models import Pitches
from app import db
class PitchTest(unittest.TestCase):
def setUp(self):
self.new_pitch=Pitches(pitch="android app",author="john",title="galaxy",id=12)
def test_instance(self):
self.assertTrue(isinstance(self.new_pitch,Pitches))
def test_init(self):
self.assertEqual(self.new_pitch.pitch,"coding UI")
self.assertEqual(self.new_pitch.author,"john")
self.assertEqual(self.new_pitch.title,"React")
self.assertEqual(self.new_pitch.id,12)
``` |
{
"source": "johnmwangi/Py-Password-Locker",
"score": 4
} |
#### File: johnmwangi/Py-Password-Locker/credentials_test.py
```python
import unittest # Importing the unittest module
import pyperclip#helps in copy and paste functions
from credentials import Credentials
class TestCredentials(unittest.TestCase):
'''
Test class that defines test cases for the credentials class behaviours
Args:
unittest.TestCase: Testcase class that helps create test cases
'''
def setUp(self):
'''
Function to create social media account credentials before each test
'''
self.new_credentials = Credentials('iyaz', 'facebook','iyaz2','account')
def tearDown(self):
'''
tearDown method that executes a set of instructions after every test
'''
#User.users_list = []
Credentials.credentials_list = []
def test__init__(self):
'''
Confirm that instance of credentials creation is as expected
'''
self.assertEqual(self.new_credentials.user_name,'iyaz')
self.assertEqual(self.new_credentials.social_media,'facebook') # self.assertEqual(self.new_credentials.password,'account')
# def test_confirm_user(self):
# '''
# Function to confirm login details to active user
# '''
# # self.assertEqual(self.new_credentials.,"John")
# # self.assertEqual(self.new_credentials.second_name,"Mwangi")
# self.new_user = User('John','Mwangi','locker')
# self.new_user.save_user()
# userX= User('John','Mwangi', 'locker')
# userX.save_user()
# #active_from credentials import Credentials
# user = Credential.confirm_user('John','locker')
# self.assertTrue(active_user)
def test_save_credentials(self):
'''
Test and confirm that the new credential information is being saved
'''
self.new_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list),1)
def test_display_credentials(self):
'''
Test to confirm user can view the correct credential details
'''
self.new_credentials.save_credentials()
facebook = Credentials('iyaz','facebook','iyaz2','<PASSWORD>')
facebook.save_credentials()
self.assertEqual(Credentials.display_credentials(),Credentials.credentials_list)
def test_search_social_media(self):
'''
Test to confirm if the method returns the correct social media credential
'''
self.new_credentials.save_credentials()
facebook = Credentials('iyaz','Facebook','iyaz2','account')
facebook.save_credentials()
credential_exists = Credentials.search_social_media('Facebook')
self.assertEqual(credential_exists,facebook)
def test_copy_password(self):#uses pyperclip
'''
Test to check if the copy password method will copy the correct password from social media site specified
'''
self.new_credentials.save_credentials()
facebook = Credentials('iyaz','facebook','iyaz2','account')
facebook.save_credentials()
Credentials.copy_password('<PASSWORD>')
self.assertEqual(self.new_credentials.password,pyperclip.paste())
if __name__ == '__main__':
unittest.main()
```
#### File: johnmwangi/Py-Password-Locker/passwrdrun.py
```python
import pyperclip
from user import User
from credentials import Credentials
def create_user(firstname,lastname,password):
'''
Function to create user ac
'''
new_user = User(firstname,lastname,password)
return new_user
def save_user(user):
'''
Function to save new users
'''
User.save_user(user)
def authenticate_user(first_name,password):
'''
Function to verify user is enabled before launching the credentials
'''
confirm_user = Credentials.confirm_user(first_name,password)
return confirm_user
def generate_password():
'''
Function to automatically gen password
'''
gen_pwd = Credentials.generate_password()
return gen_pwd
def create_credentials(user_name,social_media,account_name,password):
'''
Function creating new credentials
'''
new_credential = Credentials(user_name,social_media,account_name,password)
return new_credential
def save_credentials(credential):
'''
Saves new credentials
'''
Credential.save_credentials(credential)
def display_credentials():
'''
Function to display_credentials saved by user
'''
return Credentials.display_credentials()
def copy_password(social_media):
'''
Function to copy credential details and paste then in clipboard
'''
return Credential.copy_password(social_media)
def main():
print(' ')
print('Password Locker.')
while True:
print(' ')
print("*"*70)
print('Use these codes to navigate: \n cn-Create Password Locker Account \n ln-Log Into Password Locker to access your credentials \n ex-Exit')
short_code = input('Enter an option: ').lower().strip()
if short_code == 'ex':
break
elif short_code == 'cn':
print("*"*70)
print(' ')
print('To create a new password locker account:')
first_name = input('Enter your first name - ').strip()
last_name = input('Enter your last name - ').strip()
password = input('Enter your password - ').strip()
save_user(create_user(first_name,last_name,password))
print(" ")
print(f'New Password Locker Account Created for: {first_name} {last_name} using password: {password}')
elif short_code == 'ln':
print("*"*70)
print(' ')
print('To login, enter your password locker account details:')
user_name = input('Enter your first name - ').strip()
password = str(input('Enter your password - '))
user_exists = authenticate_user(user_name,password)
if user_exists == user_name:
print(" ")
print(f'Welcome {user_name}. Please choose an option to continue.')
print(' ')
while True:
print("*"*70)
print('Navigation codes: \n cm-Create Social Media Credentials\n dc-Display Credentials \n copy-Copy Social Media Password \n ex-Exit')
short_code = input('Choose an option: ').lower().strip()
print("*"*70)
if short_code == 'ex':
print(" ")
print(f'Goodbye {user_name}')
break
elif short_code == 'cm':
print(' ')
print('Enter your credential details:')
social_media = input('Enter the social media name- ').strip()
account_name = input('Enter your social media handle - ').strip()
while True:
print(' ')
print("*"*70)
print('Please choose an option for entering a password: \n ep-enter existing password \n gp-generate a password \n ex-exit')
psw_options = input('Enter an option: ').lower().strip()
print("*"*70)
if psw_options == 'ep':
print(" ")
password = input('Enter your password: ').strip()
break
elif psw_options == 'gp':
password = generate_password()
break
elif psw_options == 'ex':
break
else:
print('Sorry! Incorrect option entered. Try again.')
save_credentials(create_credential(user_name,social_media,account_name,password))
print(' ')
print(f'Credential Created: social media Name: {social_media} - Social Media Handle: {account_name} - Password: {password}')
print(' ')
elif short_code == 'dc':
print(' ')
if display_credentials():
print('Here is a list of all your social media credentials')
print(' ')
for credential in display_credentials():
print(f'Social Media Account: {credential.social_media} - Social Media Handle: {credential.account_name} - Password: {credential.password}')
print(' ')
else:
print(' ')
print("Sorry, no credentials. cc to add.")
print(' ')
elif short_code == 'copy':
print(' ')
choose = input('Enter the social_media name for the credential password to copy: ')
copy_password(choose)
print('Password copied succesfully')
else:
print('Sorry! Incorrect option entered. Try again.')
else:
print(' ')
print('Sorry! Incorrect details entered. Try again or Create an Account.')
else:
print("* "*70)
print(' ')
print('Sorry! Incorrect option entered. Try again.')
if __name__ == '__main__':
main()
```
#### File: johnmwangi/Py-Password-Locker/user.py
```python
import pyperclip
import string
import random
class User:
'''
Class to create new user accounts and save the same to help in accesssing the pwd locker
'''
users_list = []
def __init__(self,first_name,last_name,password):
'''
Method to define the properties of the object
'''
self.first_name = first_name
self.last_name = last_name
self.password= password
def save_user(self):
'''
save user details method into users_list
'''
User.users_list.append(self)
``` |
{
"source": "johnmwright/GaragePi",
"score": 3
} |
#### File: GaragePi/Python/garagePi.py
```python
import RPi.GPIO as GPIO
import time
from datetime import datetime, timedelta
from pymongo import MongoClient
import SonicController as Sonic
import LedController as LED
import PhotocellController as Lux
import TempSensorController as Temperature
GPIO.setmode(GPIO.BCM)
DISTANCE_TO_CLOSED_DOOR = 70 #cm - is actually about 60 but get readings up to 68 sometimes
MAX_LIGHT_WHEN_LIGHT_IS_ON = 300
SAMPLE_SPEED = 5 #seconds
# GPIO pin numbers
LED_RED = 12
LED_GREEN = 26
LED_BLUE = 13
TRIG = 23
ECHO = 24
LED_OPEN = LED_RED
LED_RUN = 25
PHOTO = 17
LED_LIGHT = LED_GREEN
def checkGarageDoor():
distance = sensor.readDistance()
if distance < DISTANCE_TO_CLOSED_DOOR:
print(" - Door open")
return True
else:
print(" - Door closed")
return False
def checkLight():
lightLevel = lux.resistanceTime()
if lightLevel < MAX_LIGHT_WHEN_LIGHT_IS_ON:
print(" - Light is on")
return True
else:
print(" - Light is off")
return False
ledRun = LED.LedController(LED_RUN, "Running Indicator")
ledDoor = LED.LedController(LED_OPEN, "Door Open")
ledLight = LED.LedController(LED_LIGHT, "Light On")
sensor = Sonic.SonicController(TRIG, ECHO)
lux = Lux.PhotocellController(PHOTO)
tempSensor = Temperature.TempSensorController(0)
# this is just to make sure the led is initialized to off
ledBlue = LED.LedController(LED_BLUE, "blue - unused")
lastRecord = None
try:
ledRun.turnOn()
dbclient = MongoClient()
db = dbclient.garagePi_database
while True:
timestamp = datetime.utcnow()
print("Beginning Sensor Checks {}".format( timestamp))
garageDoorIsOpen = checkGarageDoor()
lightIsOn = checkLight()
temp_c, temp_f = tempSensor.readTemp()
if (garageDoorIsOpen):
ledDoor.turnOn()
else:
ledDoor.turnOff()
if (not garageDoorIsOpen) and lightIsOn:
ledLight.turnOn()
else:
ledLight.turnOff()
record = { "doorOpen" : garageDoorIsOpen,
"lightOn" : lightIsOn,
"temp_F" : temp_f,
"timestamp": timestamp,
"sourceLanguage": "python"
}
shouldSaveRecord = False
if lastRecord is None:
print(" + lastRecord is None")
shouldSaveRecord = True
else:
if garageDoorIsOpen or garageDoorIsOpen != lastRecord["doorOpen"]:
print(" + garageDoorIsOpen differs from lastRecord {}".format(lastRecord["doorOpen"]))
shouldSaveRecord = True
if lightIsOn or lightIsOn != lastRecord["lightOn"]:
print(" + lightIsOn differs from lastRecord {}".format(lastRecord["lightOn"]))
shouldSaveRecord = True
alreadyRecordedForThisMinute = timestamp.minute == lastRecord["timestamp"].minute
if not alreadyRecordedForThisMinute and (timestamp.minute == 0 or timestamp.minute == 15 or timestamp.minute == 30 or timestamp.minute == 45):
print(" + recording due to 15 minute period")
shouldSaveRecord = True
if shouldSaveRecord:
readingId = db.readings.insert(record)
print(" readings posted to db with id {}".format(readingId))
lastRecord = record
time.sleep(SAMPLE_SPEED)
except KeyboardInterrupt:
print("keyboard interrupt caught")
finally:
sensor.teardown()
lux.teardown()
ledDoor.teardown()
ledLight.teardown()
ledRun.teardown()
GPIO.cleanup()
print("exiting")
```
#### File: GaragePi/Python/PhotocellController.py
```python
import RPi.GPIO as GPIO
import time
import os
#
# Based, in part, on https://learn.adafruit.com/basic-resistor-sensor-reading-on-raspberry-pi/basic-photocell-reading
#
class PhotocellController:
def __init__(self, pin):
self.pin = pin
print("Initializing Photocell")
def resistanceTime(self):
print(" Reading photocell")
timeout = 1000
reading = 0
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(self.pin, GPIO.IN)
# This takes about 1 millisecond per loop cycle (really, it's longer)
while (GPIO.input(self.pin) == GPIO.LOW):
reading += 1
if reading > timeout:
break
print(" Light Reading: {}".format(reading))
return reading
def teardown(self):
pass
```
#### File: GaragePi/Python/SonicController.py
```python
import RPi.GPIO as GPIO
import time
class SonicController:
SPEED_OF_SOUND = 34000 #cm/s
def __init__(self, triggerPin, echoPin):
self.triggerPin = triggerPin
self.echoPin = echoPin
print("Initializing Ultrasonic Range Finder")
GPIO.setup(self.triggerPin, GPIO.OUT, pull_up_down = GPIO.PUD_DOWN)
GPIO.setup(self.echoPin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.output(self.triggerPin, False)
print("Waiting For Sensor To Settle")
time.sleep(2)
def _readDistanceOnce(self):
print(" Distance Measurement In Progress")
READING_TIMEOUT = 2 #sec
maxTime = time.time() + READING_TIMEOUT
GPIO.output(self.triggerPin, True)
time.sleep(0.00001)
GPIO.output(self.triggerPin, False)
pulse_start = time.time()
while GPIO.input(self.echoPin)==0 and pulse_start < maxTime:
pulse_start = time.time()
pulse_end = time.time()
while GPIO.input(self.echoPin)==1 and pulse_end < maxTime:
pulse_end = time.time()
if pulse_end > maxTime:
print(" PULSE READ TIMED OUT")
pulse_duration = pulse_end - pulse_start
roundtrip_duration = pulse_duration * self.SPEED_OF_SOUND
one_way_distance = roundtrip_duration/2
print(" Distance: {0:0.2f} cm".format(one_way_distance))
return one_way_distance
def readDistance(self):
#
# Take multiple readings in order to counter the affects of
# bad data due to non-realtime OS. Take a bunch of readings,
# throw out the min and max, then average the rest.
#
numReadingsToTake = 8
print(" Taking {} Distance Measurements".format(numReadingsToTake))
measurements = []
for x in range(0, numReadingsToTake):
thisReading = self._readDistanceOnce()
measurements.append(thisReading)
maxReading = max(measurements)
minReading = min(measurements)
measurements.remove(maxReading)
measurements.remove(minReading)
average = sum(measurements)/len(measurements)
print(" Average Distance: {0:0.2f} cm".format(average))
return average
def teardown(self):
print("Tearing down Ultrasonic Range Finder")
GPIO.output(self.triggerPin, False)
``` |
{
"source": "Johnn3y/ipfs-pin-steem",
"score": 3
} |
#### File: ipfs-pin-steem/ipfspinsteem/__init__.py
```python
import argparse,ipfspinsteem.strings as s
from ipfspinsteem.ipfspinsteem2 import Steem,Parser,IPFS
import ipfshttpclient
parser = argparse.ArgumentParser(description='Extracts IPFS Hashes to Dtube/Dsound, creates an IPFS Object and pins it to an IPFS node.')
parser.add_argument('url', type=str, nargs='+',help='Dtube Video-Url. Example: account/permlink')
parser.add_argument('--api', dest='api', default='/ip4/127.0.0.1/tcp/5001/http',help='IPFS API IP. Default:/ip4/127.0.0.1/tcp/5001/http')
parser.add_argument('--exclude', dest='exclude', nargs='+',help='Exclude something. Example: videohash')
parser.add_argument('--object', dest='object',action='store_true',help='Will wrap all hashes to a single IPFS Object.')
parser.add_argument('--no-pin', dest='nopin',action='store_true',help='Will not pin anything to IPFS')
parser.add_argument('--quiet', dest='quiet',action='store_true',help='Will only print hash(es) and nothing else when successfully finnished.')
args=parser.parse_args()
def main():
'''
Connect to IPFS and Steem api.
'''
ipfs=IPFS(args.api)
steem=Steem(steemd_nodes=None)
'''
Get user and permlinks from url argument.
'''
try:
parser=Parser()
info = parser.parseURL(urls=args.url)
except ValueError:
print("Invalid URL. Aborted")
exit(0)
'''
Get IPFS hashes from Steem by content(user and permlink)
'''
try:
hashes=steem.getHashesByContentList(info)
except SyntaxError:
print("SyntaxError. Probablly unavailable user and/or permlink. Aborted")
exit(0)
except KeyboardInterrupt:
print("Interrupted by user")
if hashes is None:
print('No hashes found')
exit(0)
'''
Remove invalid stuff
'''
opts={s.donotadd:args.exclude}
hashes=Steem.removeInvalid(hashes,opts)
'''
Link all Hashes to a new IPFS Object or just add Hashes to list without creating object
'''
#print(ipfs.parseHash("QmbWPdc526RQxMVijVNQEyvz6s1Mh5hT3r4CZ4xhUh6xZk","QmbWPdc526RQxMVijVNQEyvz6s1Mh5hT3r4CZ4xhUh6xZk"))
#objopts={s.donotadd:args.exclude}#Object creation options
liste=[]
for h in hashes:
for e in h[s.permlinks]:
for q in e:
for p in q[s.links]:
if args.quiet == False:
print('extracted',h[s.user],'/',q[s.permlink],'/',p[s.Name],'/',p[s.Hash])
if args.object == False:
liste.append(p[s.Hash])
#ipfs.pin(p[s.Hash])
#print('pinned',p[s.Hash],'recursively')
if args.object == True:
try:
obj=ipfs.createNewSingleObject(hashes),
except KeyboardInterrupt:
print('interrupted by user')
exit(0)
#obj=obj[0]
liste.append(obj[0])
if args.quiet == False:
print('created object',obj[0])
#obj=obj[0]
#if args.quiet==False:
'''
Pin List "liste" to IPFS Node
'''
if args.nopin==False:
try:
ipfs.pin(liste)
if args.quiet==False:
for i in liste:
print('pinned',i,'recursively')
else:
for i in liste:
print(i)
except ipfshttpclient.exceptions.DecodingError as e:
print('pinning failed')
print(e)
except KeyboardInterrupt:
print('Pinning Interrupted by user')
elif args.object == True:
if args.quiet== True:
print(liste[0])
``` |
{
"source": "johnna123/pqlearn",
"score": 3
} |
#### File: johnna123/pqlearn/snake_train.py
```python
import joblib
import matplotlib.pyplot as plt
import numpy as np
from pqlearn import QLearn
from snake_game import SnakeGame
def train(iters, warm_start=False, verbose=False, learning_rate=0.8, gamma=0.8, epsilon=0.2,
dont_repeat=False, name="snake_ai.pkl"):
"""
QLearn usage example training in the Snake environment
"""
if warm_start:
ai = joblib.load(name)
else:
ai = QLearn([0, 1, 2, 3])
ai.learning_rate = learning_rate
ai.gamma = gamma
ai.epsilon = epsilon
ai.verbose = verbose
ai.no_repeat = dont_repeat
evals = []
bu_iter = 100
for i in range(1, iters + 1):
game = SnakeGame()
ai = game.demo(ai, light_mode=True)
evals.append(np.sum(np.array([v for v in ai.memory.values()])))
plt.plot(evals, c="b")
plt.pause(0.05)
if not i % bu_iter:
joblib.dump(ai, name)
joblib.dump(ai, name)
if __name__ == '__main__':
train(1000, epsilon=0.8, dont_repeat=True)
``` |
{
"source": "johnnadratowski/examples",
"score": 3
} |
#### File: python/clipboard/clipboard.py
```python
import argparse
import sys
import log
import json
import os
import time
import threading
import pyperclip
class ClipboardWatcher(threading.Thread):
def __init__(self, history):
super(ClipboardWatcher, self).__init__()
self._history = history
self._stopping = False
def run(self):
recent = ""
while not self._stopping:
tmp = pyperclip.paste()
if tmp != recent:
recent = tmp
print("Found clipboard item: ", tmp)
self._history.append(tmp)
time.sleep(1)
def stop(self):
self._stopping = True
def load_history(history_conf):
if os.path.exists(history_conf):
try:
history_file = file(history_conf, 'r')
except:
log.exception("Couldn't read history file")
sys.exit(1)
try:
history_entries = json.load(history_file)
except:
log.exception("An error occurred loading history file")
sys.exit(1)
else:
history_entries = []
return history_entries
def cleanup(watcher, entries, args):
json.dump(entries, args.history)
watcher.stop()
def main(args):
entries = load_history(os.path.abspath(args.history))
watcher = ClipboardWatcher(entries)
watcher.start()
while True:
try:
print("Waiting for changed clipboard...")
time.sleep(10)
except KeyboardInterrupt:
cleanup(watcher, entries, args)
sys.exit(0)
except:
log.exception("An exception occurred during processing")
cleanup(watcher, entries, args)
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('--history', default="~/.cliphistory",
help='the clipboard history to load')
args = parser.parse_args()
main(args)
```
#### File: python/concurrency/process_pool_5.py
```python
import pprint
import requests
import sys
from multiprocessing import Pool
def get_extract(idx, title, id):
print("RUNNING PROCESS %s" % idx)
print("GETTING PAGE %s EXTRACT" % idx)
content = requests.get('https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&exintro=&explaintext=&titles='+str(title))
data = content.json()
extract = {'title': title, 'extract': data['query']['pages'][str(id)]['extract']}
print("FINISHED PAGE %s EXTRACT" % idx)
print("FINISHED PROCESS %s" % idx)
return extract
def get_extracts(limit=5):
pages = get_pages(limit=limit)
pages_list = pages['query']['random']
print("CREATING POOL")
# Create a pool of processes to pick up stuff
pool = Pool(limit)
print("POOL CREATED")
extracts = []
for idx, page in enumerate(pages_list):
args = (idx, page['title'], page['id'])
# Apply the function asynchronously to the pool - NON BLOCKING
extracts.append(pool.apply_async(get_extract, args))
pool.close()
pool.join()
return extracts
def get_pages(limit=5):
pages = requests.get('https://en.wikipedia.org/w/api.php?action=query&list=random&rnnamespace=0&format=json&rnlimit='+str(limit))
return pages.json()
if __name__ == "__main__":
import time
start = time.time()
try:
limit = 10
if len(sys.argv) > 1:
limit = sys.argv[1]
extracts = get_extracts(limit=limit)
all_extracts = []
for extract in extracts:
# Consume extracts from pool AsyncResult
all_extracts.append(extract.get())
pprint.pprint(all_extracts)
except:
import traceback
traceback.print_exc()
print("TOTAL TIME: ", time.time() - start)
```
#### File: python/profiling/gen_file_1.py
```python
import sys, traceback
DATA = b'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
def gen_file(size, filename):
output = []
outfile = open(filename, mode='w')
while True:
append_data(output, outfile, size)
if len(output) >= size:
outfile.write(str(bytes(output)))
return
def append_data(output, outfile, size):
for x in DATA:
output.append(x)
if len(output) >= size:
return
if __name__ == "__main__":
try:
gen_file(int(sys.argv[1]), sys.argv[2])
sys.exit(0)
except Exception as e:
traceback.print_exc()
sys.exit(1)
``` |
{
"source": "johnnadratowski/git-reviewers",
"score": 3
} |
#### File: git-reviewers/git_reviewers/cli.py
```python
import argparse
from os.path import abspath
from git_reviewers.reviewers import get_git_branches, get_reviewers
def run():
parser = argparse.ArgumentParser(description="Get the suggested reviewers for a commit")
parser.add_argument('--branch', '-b',
required=False,
help="Check for a PR against a specific branch")
parser.add_argument('--contributor', '-c',
required=False,
help="View lines of code for a specific contributor")
parser.add_argument('--output', '-o',
required=False,
default="default",
help="The output format: default|raw. Raw dumps json in-memory data structures for debugging and"
"consumption by other applications.")
parser.add_argument('files', metavar='file', type=str, nargs='*',
help='Only show reviewers for certain files. If none specified, shows reviewers for all files')
args = parser.parse_args()
if args.branch:
branch = args.branch
elif 'develop' in get_git_branches():
branch = 'develop'
else:
branch = 'master'
if args.files:
args.files = [abspath(path) for path in args.files]
get_reviewers(args.contributor, branch, args.files, args.output)
``` |
{
"source": "johnnadratowski/postman-repl",
"score": 2
} |
#### File: postman-repl/examples/middleware.py
```python
def authentication_authenticate(run, kwargs, env):
result = run(kwargs)
try:
env.token = result.json()["token"]
except:
print("Couldnt set token!")
return result
```
#### File: postman-repl/postman_repl/postman_repl_test.py
```python
import unittest
import urllib
import postman_repl as pmr
import json
class TestO(unittest.TestCase):
def test_init(self):
test = pmr.O(x=1, y=2)
self.assertDictEqual(test.__dict__, {'x': 1, 'y': 2})
def test_get(self):
test = pmr.O(x=1, y=2)
self.assertEqual(test["x"], 1)
self.assertEqual(test.x, 1)
def test_set(self):
test = pmr.O(x=1, y=2)
test.x = 2
self.assertEqual(test["x"], 2)
self.assertEqual(test.x, 2)
def test_del(self):
test = pmr.O(x=1, y=2)
del test.x
self.assertEqual(test.x, None)
self.assertDictEqual(test.__dict__, {'y': 2})
def test_iter(self):
test = pmr.O(x=1, y=2)
for k in test:
self.assertTrue(k == "x" or k == "y")
self.assertTrue(test[k] == 1 or test[k] == 2)
def test_todict(self):
test = pmr.O(x=1, y=2)
self.assertDictEqual(test.__dict__, {'x': 1, 'y': 2})
def test_todict_recursive(self):
test = pmr.O(x=1, y=2, z=pmr.O(x=1, y=2))
self.assertDictEqual(test._to_dict_recursive(),
{'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}})
def test_tojson(self):
test = pmr.O(x=1, y=2, z=pmr.O(x=1, y=2))
self.assertDictEqual(json.loads(test._to_json()),
{'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}})
def test_new_recursive(self):
expect = pmr.O(x=1, y=2, z=pmr.O(x=1, y=2))
test = {'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}}
test = pmr.new_recursive(**test)
self.assertEqual(test._to_dict_recursive(), expect._to_dict_recursive())
def test_new_recursive_list(self):
expect = [
{'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}},
{'x': 1, 'y': 2, 'z': {'x': 1, 'y': 2}}]
test = [
pmr.O(
x=1, y=2, z=pmr.O(
x=1, y=2)), pmr.O(
x=1, y=2, z=pmr.O(
x=1, y=2))]
test = pmr.new_recursive_list(*test)
self.assertListEqual([x._to_dict_recursive() for x in test], expect)
class TestPostmanRepl(unittest.TestCase):
def setUp(self):
self.coll_file = "../examples/JIRA.json.postman_collection"
self.env_file = "../examples/test.env"
self.mw_file = "../examples/middleware.py"
self.collection = pmr.load_collection(self.coll_file)
self.env = pmr.load_environment(self.env_file)
self.mw = pmr.load_middleware(self.mw_file)
def tearDown(self):
pmr.H.history = []
pmr.R = None
pmr.J = None
pmr.D = None
pmr.MW = pmr.O()
pmr.E = pmr.O()
pmr.P = None
def test_load_collection(self):
self.assertTrue("sprints" in self.collection)
self.assertTrue("META" in self.collection["sprints"])
self.assertTrue("rapidview" in self.collection["sprints"])
self.assertTrue("sprint" in self.collection["sprints"])
self.assertTrue("sprint_issues" in self.collection["sprints"])
self.assertTrue("users" in self.collection)
self.assertTrue("META" in self.collection["users"])
self.assertTrue("search_username" in self.collection["users"])
def test_load_environment(self):
self.assertDictEqual(self.env._to_dict_recursive(), {
"host": "localhost",
"protocol": "http",
"port": "8081",
"username": "user",
"password": "password",
})
def test_middleware(self):
called = [False]
def middleware(run, kwargs, env):
called[0] = True
middlewares = pmr.O(sprints_sprint=middleware)
self.collection["sprints"]["sprint"](env=self.env, middlewares=middlewares)
self.assertTrue(called[0])
def test_get_default_data(self):
expect = {'password': '', 'username': ''}
test = self.collection["sprints"]["sprint"].default_data().__dict__
self.assertDictEqual(expect, test)
def test_call(self):
called = [False, False, False]
i = [0]
def middleware(run, kwargs, env):
called[i[0]] = True
i[0] += 1
middlewares = pmr.O(sprints_sprint=middleware,
sprints_sprint_issues=middleware,
sprints_rapidview=middleware)
self.collection["sprints"]["sprint"](params={'includeHistoricSprints': 'false'}, env=self.env, middlewares=middlewares)
self.collection["sprints"]["sprint_issues"](env=self.env, middlewares=middlewares)
self.collection["sprints"]["rapidview"](env=self.env, middlewares=middlewares)
self.assertTrue(all(called))
url = urllib.parse.urlparse(pmr.H.history[0].url)
self.assertEqual(url.path, "/rest/greenhopper/latest/sprintquery/")
self.assertDictEqual(urllib.parse.parse_qs(url.query), {'includeHistoricSprints': ['false'], 'includeFutureSprints': ['true']})
self.assertEqual(pmr.H.history[1].url, "https://unified.jira.com/rest/greenhopper/latest/rapid/charts/sprintreport")
self.assertEqual(pmr.H.history[2].url, "https://unified.jira.com/rest/greenhopper/latest/rapidviews/list")
def test_history(self):
called = [False, False, False, False]
i = [0]
def middleware(run, kwargs, env):
called[i[0]] = True
i[0] += 1
middlewares = pmr.O(sprints_sprint=middleware,
sprints_sprint_issues=middleware,
sprints_rapidview=middleware)
self.collection["sprints"]["sprint"](env=self.env, middlewares=middlewares)
self.collection["sprints"]["sprint_issues"](env=self.env, middlewares=middlewares)
self.collection["sprints"]["rapidview"](env=self.env, middlewares=middlewares)
self.assertEqual(len(pmr.H.history), 3)
pmr.H(0)
self.assertTrue(all(called))
def test_help(self):
expect = """Sprints / Sprint:
GET https://unified.jira.com/rest/greenhopper/latest/sprintquery/{{rapidViewId}}?includeHistoricSprints=true&includeFutureSprints=true
Get a specific sprint from Jira
Default Headers:
Authorization: Basic am9objphbkdlbDgz
Default Data:
{
"username": "{{username}}",
"password": "{{password}}"
}"""
self.assertEqual(expect, self.collection["sprints"]["sprint"].__doc__)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "John-Nagle-IV/Python-DND-5e-Scraper",
"score": 3
} |
#### File: Python-DND-5e-Scraper/scrape5e/docparser.py
```python
import BeautifulSoup
NO_TABLE = {
"default": [
{}
]
}
def extract_table_data(table_element):
# type: (BeautifulSoup.Tag) -> dict
current_fieldnames = []
field_obj = {}
current_title = "default"
for table_row in table_element.findAll("tr"):
row = [table_header.text for table_header in table_row.findAll("th")]
if len(row) == 1:
current_title = row[0]
elif len(row) > 1:
current_fieldnames = row
else:
data = [table_data.text for table_data in table_row.findAll("td")]
field_obj.setdefault(current_title, []).append(dict(zip(current_fieldnames, data)))
caption = table_element.find("caption")
if current_title == "default" and caption:
field_obj[caption.text.replace("Table: ", "")] = field_obj["default"]
del field_obj["default"]
return field_obj
def extract_all_tables_from_page(page):
# type: (BeautifulSoup) -> list
all_tables = [extract_table_data(table) for table in page.findAll("table")]
return list(filter(lambda data: data != NO_TABLE, all_tables))
``` |
{
"source": "JohnNay/forecastVeg",
"score": 3
} |
#### File: JohnNay/forecastVeg/logger.py
```python
import sys, time
logfile = None
def log(category, message, console = True):
global logfile
categories = ['INFO','WARNING','ERROR','SUCCESS']
cat_len = max(map(len,categories))
logtext = '%*s %s\n' % (-(cat_len + 1),category+':',message)
if console:
sys.stderr.write(logtext)
if logfile:
logfile.write(logtext)
def init_logfile(filename):
global logfile
logfile = open(filename,'a')
logfile.write("""\
*******************************************************
Initializing Log File: %s
*******************************************************
""" % time.ctime())
def check_logfile():
global logfile
if logfile:
return logfile.name
else:
return None
def close_logfile():
global logfile
if logfile:
logfile.close()
logfile = None
``` |
{
"source": "johnneerdael/python-bloxone",
"score": 2
} |
#### File: lib/bloxone/bloxone.py
```python
import logging
import configparser
import requests
import os
import re
import json
# ** Global Vars **
__version__ = '0.7.4'
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__doc__ = 'https://python-bloxone.readthedocs.io/en/latest/'
__license__ = 'BSD'
# Custom Exceptions
class IniFileSectionError(Exception):
'''
Exception for missing section in ini file
'''
pass
class IniFileKeyError(Exception):
'''
Exception for missing key in ini file
'''
pass
class APIKeyFormatError(Exception):
'''
Exception for API key format mismatch
'''
pass
# ** Facilitate ini file for basic configuration including API Key
def read_b1_ini(ini_filename):
'''
Open and parse ini file
Parameters:
ini_filename (str): name of inifile
Returns:
config (dict): Dictionary of BloxOne configuration elements
Raises:
IniFileSectionError
IniFileKeyError
APIKeyFormatError
FileNotFoundError
'''
# Local Variables
cfg = configparser.ConfigParser()
config = {}
ini_keys = ['url', 'api_version', 'api_key']
# Check for inifile and raise exception if not found
if os.path.isfile(ini_filename):
# Attempt to read api_key from ini file
try:
cfg.read(ini_filename)
except configparser.Error as err:
logging.error(err)
# Look for BloxOne section
if 'BloxOne' in cfg:
for key in ini_keys:
# Check for key in BloxOne section
if key in cfg['BloxOne']:
config[key] = cfg['BloxOne'][key].strip("'\"")
logging.debug('Key {} found in {}: {}'
.format(key, ini_filename, config[key]))
else:
logging.error('Key {} not found in BloxOne section.'
.format(key))
raise IniFileKeyError('Key "' + key + '" not found within'
'[BloxOne] section of ini file {}'.format(ini_filename))
else:
logging.error('No BloxOne Section in config file: {}'
.format(ini_filename))
raise IniFileSectionError('No [BloxOne] section found in ini file {}'
.format(ini_filename))
# Verify format of API Key
if verify_api_key(config['api_key']):
logging.debug('API Key passed format verification')
else:
logging.debug('API Key {} failed format verification'
.format(config['api_key']))
raise APIKeyFormatError('API Key {} failed format verification'
.format(config['api_key']))
else:
raise FileNotFoundError('ini file "{}" not found.'.format(ini_filename))
return config
def verify_api_key(apikey):
'''
Verify format of API Key
Parameters:
apikey (str): api key
Returns:
bool: True is apikey passes format validation
'''
if re.fullmatch('[a-z0-9]{32}|[a-z0-9]{64}', apikey, re.IGNORECASE):
status = True
else:
status = False
return status
class b1:
'''
Parent Class to simplify access to the BloxOne APIs for subclasses
Can also be used to genericallly access the API
Raises:
IniFileSectionError
IniFileKeyError
APIKeyFormatError
FileNotFoundError
'''
def __init__(self, cfg_file='config.ini'):
'''
Read ini file and set attributes
Parametrers:
cfg_file (str): Override default ini filename
'''
self.cfg = {}
# Read ini file
self.cfg = read_b1_ini(cfg_file)
# Define generic header
self.api_key = self.cfg['api_key']
self.headers = ( {'content-type': 'application/json',
'Authorization': 'Token ' + self.api_key} )
# Create base URLs
self.base_url = self.cfg['url']
self.api_version = self.cfg['api_version']
self.anycast_url = self.base_url + '/api/anycast/' + self.cfg['api_version']
self.authn_url = self.base_url + '/api/authn/' + self.cfg['api_version']
self.bootstrap_url = self.base_url + '/bootstrap-app/' + self.cfg['api_version']
self.cdc_url = self.base_url + '/api/cdc-flow/' + self.api_version
self.diagnostics_url = self.base_url + '/diagnostic-service/' + self.api_version
self.ddi_url = self.base_url + '/api/ddi/' + self.api_version
self.host_url = self.base_url + '/api/host_app/' + self.cfg['api_version']
self.notifications_url = self.base_url + '/atlas-notifications-config/'+ self.api_version
self.sw_url = self.base_url + '/api/upgrade_policy/' + self.cfg['api_version']
self.ztp_url = self.base_url + '/atlas-host-activation/' + self.cfg['api_version']
self.tdc_url = self.base_url + '/api/atcfw/' + self.cfg['api_version']
self.tdep_url = self.base_url + '/api/atcep/' + self.cfg['api_version']
self.tddfp_url = self.base_url + '/api/atcdfp/' + self.cfg['api_version']
self.tdlad_url = self.base_url + '/api/atclad/' + self.cfg['api_version']
self.tide_url = self.base_url + '/tide/api'
self.dossier_url = self.base_url + '/tide/api/services/intel/lookup'
self.threat_enrichment_url = self.base_url + '/tide/threat-enrichment'
# List of successful return codes
self.return_codes_ok = [200, 201, 204]
return
def _add_params(self, url, first_param=True, **params):
# Add params to API call URL
if len(params):
for param in params.keys():
if first_param:
url = url + '?'
first_param = False
else:
url = url + '&'
url = url + param + '=' + params[param]
return url
def _apiget(self, url):
# Call BloxOne API
try:
response = requests.request("GET",
url,
headers=self.headers)
# Catch exceptions
except requests.exceptions.RequestException as e:
logging.error(e)
logging.debug("url: {}".format(url))
raise
# Return response code and body text
# return response.status_code, response.text
return response
def _apipost(self, url, body, headers=""):
# Set headers
if not headers:
headers = self.headers
# Call BloxOne API
try:
response = requests.request("POST",
url,
headers=headers,
data=body)
# Catch exceptions
except requests.exceptions.RequestException as e:
logging.error(e)
logging.debug("url: {}".format(url))
logging.debug("body: {}".format(body))
raise
# Return response code and body text
return response
def _apidelete(self, url, body=""):
# Call BloxOne API
try:
response = requests.request("DELETE",
url,
headers=self.headers,
data=body)
# Catch exceptions
except requests.exceptions.RequestException as e:
logging.error(e)
logging.debug("URL: {}".format(url))
logging.debug("BODY: {}".format(body))
raise
# Return response code and body text
return response
def _apiput(self, url, body):
# Call BloxOne API
try:
response = requests.request("PUT",
url,
headers=self.headers,
data=body)
# Catch exceptions
except requests.exceptions.RequestException as e:
logging.error(e)
logging.debug("url: {}".format(url))
logging.debug("body: {}".format(body))
raise
# Return response code and body text
return response
def _apipatch(self, url, body):
# Call BloxOne API
try:
response = requests.request("PATCH",
url,
headers=self.headers,
data=body)
# Catch exceptions
except requests.exceptions.RequestException as e:
logging.error(e)
logging.debug("url: {}".format(url))
logging.debug("body: {}".format(body))
raise
# Return response code and body text
return response
def _use_obj_id(self, url, id="", action=""):
'''
Update URL for use with object id
Parameters:
id (str): Bloxone Object id
nextip (bool): use nextavailableip
Returns:
string : Updated url
'''
# Check for id and next available IP
if id:
url = url + '/' + id
if action:
url = url + '/' + action
else:
if action:
logging.debug("Action {} not supported without "
"a specified ovject id.")
return url
# Public Generic Methods
def get(self, url, id="", action="", **params):
'''
Generic get object wrapper
Parameters:
url (str): Full URL
id (str): Optional Object ID
action (str): Optional object action, e.g. "nextavailableip"
Returns:
response object: Requests response object
'''
# Build url
url = self._use_obj_id(url,id=id)
url = self._add_params(url, **params)
logging.debug("URL: {}".format(url))
response = self._apiget(url)
return response
def create(self, url, body=""):
'''
Generic create object wrapper
Parameters:
url (str): Full URL
body (str): JSON formatted data payload
Returns:
response object: Requests response object
'''
# Build url
logging.debug("URL: {}".format(url))
# Make API Call
response = self._apipost(url, body)
return response
def delete(self, url, id=""):
'''
Generic delete object wrapper
Parameters:
url (str): Full URL
id (str): Object id to delete
Returns:
response object: Requests response object
'''
# Build url
url = self._use_obj_id(url,id=id)
logging.debug("URL: {}".format(url))
# Make API Call
response = self._apidelete(url)
return response
def update(self, url, id="", body=""):
'''
Generic create object wrapper
Parameters:
url (str): Full URL
body (str): JSON formatted data payload
Returns:
response object: Requests response object
'''
# Build url if needed
url = self._use_obj_id(url, id=id)
logging.debug("URL: {}".format(url))
# Make API Call
response = self._apiput(url, body)
return response
def replace(self, url, id="", body=""):
'''
Generic create object wrapper
Parameters:
url (str): Full URL
body (str): JSON formatted data payload
Returns:
response object: Requests response object
'''
# Build url
url = self._use_obj_id(url, id=id)
logging.debug("URL: {}".format(url))
# Make API Call
response = self._apipatch(url, body)
return response
```
#### File: lib/bloxone/dhcputils.py
```python
__version__ = '0.2.7'
__author__ = '<NAME>'
__author_email__ = '<EMAIL>'
import logging
import ipaddress
import os
import yaml
import binascii
import bloxone
from pprint import pprint
# ** Global Vars **
# DHCP Encoding Utils
class dhcp_encode():
'''
Class to assist with Hex Encoding of
DHCP Options and sub_options
'''
def __init__(self) -> None:
self.opt_types = [ 'string',
'ipv4_address',
'ipv6_address',
'boolean',
'int8',
'uint8',
'int16',
'uint16',
'int32',
'uint32',
'fqdn',
'binary',
'empty' ]
self.fqdn_re, self.url_re = bloxone.utils.buildregex()
return
def validate_ip(self, ip):
'''
Validate input data is a valid IP address
(Supports both IPv4 and IPv6)
Parameters:
ip (str): ip address as a string
Returns:
bool: Return True for valid and False otherwise
'''
try:
ipaddress.ip_address(ip)
result = True
except ValueError:
result = False
return result
# IP encondings
def ip_to_hex(self, ip):
'''
Encode an IPv4 or IPv6 address to hex
Parameters:
ip (str): IPv4 or IPv6 address as a string
Returns:
hex encoding as string
'''
if self.validate_ip(ip):
ip = ipaddress.ip_address(ip)
hex_value = '{:x}'.format(ip)
else:
logging.error(f'{ip} not a valid IP')
hex_value = ''
return hex_value
# Methods for IPv4 and IPv6
def ipv4_address_to_hex(self, ipv4):
'''
Encode an IPv4 address to hex
Parameters:
ipv4 (str): IPv4 address as a string
Returns:
hex encoding as string
'''
return self.ip_to_hex(ipv4)
def ipv6_address_to_hex(self, ipv6):
'''
Encode an IPv6 address to hex
Parameters:
ipv6 (str): IPv4 or IPv6 address as a string
Returns:
hex encoding as string
'''
return self.ip_to_hex(ipv6)
# String/text encoding
def string_to_hex(self, string):
'''
Encode a text string to hex
Parameters:
string (str): text string
Returns:
hex encoding as string
'''
s = str(string).encode('utf-8')
return s.hex()
# Boolean encoding
def boolean_to_hex(self, flag):
'''
Encode boolean value as single hex byte
Parameters:
flag (bool/str): True or False as bool or text
Returns:
hex encoding as string
'''
# Handle Bool or str
if not isinstance(flag, bool):
if isinstance(flag, str):
if flag.casefold() == 'true':
flag = True
else:
flag = False
else:
flag = False
# Set hex value
if flag:
hex = '01'
else:
hex = '00'
return hex
# integer encodings
def int_to_hex(self, i, size = 8):
'''
Encode integer of specified size as signed int in hex
Parameters:
i (int): integer value to encode
size (int): size in bits [8, 16, 32]
Returns:
hex encoding as string
'''
hex_value = ''
i = int(i)
i_sizes = [ 8, 16, 32 ]
if size in i_sizes:
max_bits = size - 1
no_bytes = size // 4
fmt = f'{{:0{no_bytes}x}}'
if i > 0 and i < (2**max_bits):
hex_value = fmt.format(int(i))
elif abs(i) <= (2**max_bits):
hex_value = fmt.format(int(abs(i) + (2**max_bits)))
else:
raise TypeError(f'{i} is out of range for int{size} type')
return hex_value
def uint_to_hex(self, i, size = 8):
'''
Encode integer of specified size as unsigned int in hex
Uses 2's compliment if supplied with negative number
Parameters:
i (int): integer value to encode
size (int): size in bits [8, 16, 32]
Returns:
hex encoding as string
'''
i = int(i)
i_sizes = [ 8, 16, 32 ]
if size in i_sizes:
max_size = 2**size
no_octets = size // 4
fmt = f'{{:0{no_octets}x}}'
if i < 0 and abs(i) < max_size:
hex_str = fmt.format(i + (2**size))
elif i < max_size:
hex_str = fmt.format(i)
else:
raise TypeError(f'{i} is out of range for uint{size} type')
return hex_str
# Methods for intX and uintX
def int8_to_hex(self, value):
return self.int_to_hex(value, size=8)
def uint8_to_hex(self, value):
return self.uint_to_hex(value, size=8)
def int16_to_hex(self, value):
return self.int_to_hex(value, size=16)
def uint16_to_hex(self, value):
return self.uint_to_hex(value, size=16)
def int32_to_hex(self, value):
return self.int_to_hex(value, size=32)
def uint32_to_hex(self, value):
return self.uint_to_hex(value, size=32)
# FDQN Encoding
def fqdn_to_hex(self, fqdn):
'''
Encode an fdqn in RFC 1035 Section 3.1 formatted hex
Parameters:
fqdn (str): hostname to encode
Returns:
hex encoding as string
'''
hex = ''
hex_label = ''
# Validate FQDN
if bloxone.utils.validate_fqdn(fqdn, self.fqdn_re):
if fqdn.endswith("."):
# strip exactly one dot from the right, if present
fqdn = fqdn[:-1]
# Encode labels
for label in fqdn.split("."):
hex_label = self.string_to_hex(label)
hex_len = self.hex_length(hex_label)
hex += hex_len + hex_label
# Terminate with null byte
hex += '00'
else:
logging.error(f'{fqdn} is not a valid FQDN')
return hex
# Binary Encoding
def binary_to_hex(self, data):
'''
Format hex string of binary/hex encoded data
Parameters:
data (str): data to format
Returns:
hex encoding as string
'''
hex_value = ''
base = 16
# Check for binary
if data[:2] == '0b':
base = 2
# Force hex encoding without 0x using base
hex_value = '{:02x}'.format(int(data, base))
return hex_value
# Empty Encoding
def empty_to_hex(self, data):
'''
Return empyt hex string ''
Parameters:
data (str): Data not to encode (should be empty)
Returns:
Empty String ''
'''
if data:
data = ''
return data
# Code and Length encoding
def optcode_to_hex(self, optcode):
'''
Encode Option Code in hex (1-octet)
Parameters:
optcode (str/int): Option Code
Returns:
hex encoding as string
'''
hex_opt = '{:02x}'.format(int(optcode))
return hex_opt
def hex_length(self, hex_string):
'''
Encode Option Length in hex (1-octet)
Parameters:
hex_string (str): Octet Encoded Hex String
Returns:
Number of Hex Octects as hex encoded string
'''
hex_len = '{:02x}'.format(int(len(hex_string) / 2))
return hex_len
# Encoding Methods
def encode_data(self, sub_opt, padding=False, pad_bytes=1):
'''
Encode the data section of a sub_option definition
Parameters:
sub_opt (dict): Dict containing sub option details
Must include 'data' and 'type' keys
padding (bool): Whether extra 'null' termination bytes are req.
pad_bytes (int): Number of null bytes to append
Returns:
Hex encoded data for specified data-type as string
'''
hex_data = ''
if sub_opt['type'] in self.opt_types:
type_to_hex = eval('self.' + sub_opt['type'] + '_to_hex')
else:
logging.error(f'Unsupported Option Type {sub_opt["type"]}')
logging.info('Unsupported option type, ' +
'attempting to process as string')
type_to_hex = eval('self.string_to_hex')
# Check for array attribute
if 'array' in sub_opt.keys():
array = sub_opt['array']
else:
logging.debug(f'No array attribute for option: {sub_opt["code"]}')
array = False
# Encode data
if array:
for item in sub_opt['data'].split(','):
hex_data += type_to_hex(item)
else:
hex_data = type_to_hex(sub_opt['data'])
if padding:
hex_data += pad_bytes * '00'
return hex_data
def encode_sub_option(self, sub_opt,
data_only=False,
padding=False,
pad_bytes=1):
'''
Encode individual sub option
Parameters:
sub_opt (dict): Sub Option Definition, as dict.
data_only (bool): Encode data portion only if True
(Note the sub_opt dict is also checked for
the 'data-only' key)
padding (bool): Whether extra 'null' termination bytes are req.
pad_bytes (int): Number of null bytes to append
Returns:
Encoded suboption as a hex string
'''
# Local variables
hex_value = ''
hex_opt = ''
hex_len = ''
hex_data = ''
# Check whether 'data-only' is defined in sub_option
if 'data-only' in sub_opt.keys():
data_only = sub_opt['data-only']
# Check whether to encode only the data or whole sub option
if data_only:
hex_data = self.encode_data(sub_opt)
hex_value = hex_data
else:
if int(sub_opt['code']) in range(0, 256):
hex_opt = self.optcode_to_hex(sub_opt['code'])
hex_data = self.encode_data(sub_opt)
hex_len = self.hex_length(hex_data)
hex_value = hex_opt + hex_len + hex_data
else:
# Log error (or potentially raise exception or something)
logging.error(f'Option Code: {sub_opt["code"]} out of range.')
hex_value = ''
return hex_value
def encode_dhcp_option(self,
sub_opt_defs=[],
padding=False,
pad_bytes=1,
encapsulate=False,
id=None,
prefix='' ):
'''
Encode list of DHCP Sub Options to Hex
Parameters:
sub_opt_defs (list): List of Sub Option definition dictionaries
padding (bool): Whether extra 'null' termination bytes are req.
pad_bytes (int): Number of null bytes to append
encapsulate (bool): Add id and total length as prefix
id (int): option code to prepend
prefix (str): String value to prepend to encoded options
Returns:
Encoded suboption as a hex string
'''
hex_value = ''
# Encode sub_options
for opt in sub_opt_defs:
hex_value += self.encode_sub_option(opt)
if encapsulate:
total_len = self.hex_length(hex)
main_opt = self.optcode_to_hex(id)
hex_value = main_opt + total_len + hex_value
if prefix:
hex_value += prefix
return hex_value
def tests(self):
'''
Run through encoding methods and output example results
'''
test_data = [ { 'code': '1', 'type': 'string',
'data': 'AABBDDCCEEDD-aabbccddeeff' },
{ 'code': '2', 'type': 'ipv4_address',
'data': '10.10.10.10' },
{ 'code': '3', 'type': 'ipv4_address',
'data': '10.10.10.10,11.11.11.11', 'array': True },
{ 'code': '4', 'type': 'boolean', 'data': True },
{ 'code': '5', 'type': 'int8', 'data': '22' },
{ 'code': '5', 'type': 'int8', 'data': '-22' },
{ 'code': '6', 'type': 'uint8', 'data': '22' },
{ 'code': '7', 'type': 'int16', 'data': '33'},
{ 'code': '8', 'type': 'int16', 'data': '33'},
{ 'code': '9', 'type': 'uint16', 'data': '33'},
{ 'code': '10', 'type': 'int32', 'data': '44'},
{ 'code': '11', 'type': 'uint32', 'data': '-44'},
{ 'code': '12', 'type': 'uint32', 'data': '44'},
{ 'code': '13', 'type': 'fqdn',
'data': 'www.infoblox.com' },
{ 'code': '14', 'type': 'binary', 'data': 'DEADBEEF' },
{ 'code': '15', 'type': 'empty', 'data': ''},
{ 'code': '16', 'type': 'ipv6_address',
'data': '2001:DB8::1' },
{ 'code': '17', 'type': 'ipv6_address',
'data': '2001:DB8::1,2001:DB8::2', 'array': True } ]
print(f'Encoding types supported: {self.opt_types}')
print()
print('Data tests:')
for data_test in test_data:
result = self.encode_data(data_test)
hex_len = self.hex_length(result)
print(f'Type: {data_test["type"]}: {data_test["data"]}, ' +
f'Encoded: {result}, Length(hex): {hex_len}')
print()
# Padding Test
test_data = { 'code': '99', 'type': 'string', 'data': 'AABBCCDD' }
result = self.encode_data(test_data, padding=True)
print(f'Padding test (1 byte), type string: {test_data["data"]}' +
f' {result}')
# Full encode test
test_data = [ { 'code': '1', 'type': 'string',
'data': 'AABBDDCCEEDD-aabbccddeeff' },
{ 'code': '2', 'type': 'ipv4_address',
'data': '10.10.10.10' },
{ 'code': '3', 'type': 'ipv4_address',
'data': '10.10.10.10,11.11.11.11', 'array': True },
{ 'code': '4', 'type': 'boolean', 'data': True },
{ 'code': '5', 'type': 'int8', 'data': '22' } ]
result = self.encode_dhcp_option(test_data)
print(f'Full encoding of sample: {result}')
return
# *** Class to handle Vendor Option Definitions Dictionary in YAML ***
class DHCP_OPTION_DEFS():
'''
Class to load and handle DHCP Option Defs
'''
def __init__(self, cfg='vendor_dict.yaml'):
'''
Initialise Class Using YAML config
Parameters:
cfg (str): Config file to load, default vendor_dict.yaml
Raises:
FileNotFoundError is yaml file is not found
'''
self.config = {}
# Check for yaml file and raise exception if not found
if os.path.isfile(cfg):
# Read yaml configuration file
try:
self.config = yaml.safe_load(open(cfg, 'r'))
except yaml.YAMLError as err:
logging.error(err)
raise
else:
logging.error(f'No such file {cfg}')
raise FileNotFoundError(f'YAML config file "{cfg}" not found.')
return
def version(self):
'''
Returns:
str containing config file version or 'Version not defined'
'''
if 'version' in self.config.keys():
version = self.config['version']
else:
version = 'Version not defined'
return version
def keys(self):
'''
Returns:
list of top level keys
'''
return self.config.keys()
def vendors(self):
'''
Returns:
list of defined vendors
'''
return self.config['vendors'].keys()
def vendor_keys(self, vendor):
'''
Returns vendor top level keys
Parameters:
vendor (str): Vendor Identifier
Returns:
list of keys defined for a vendor
'''
if self.included(vendor):
response = self.config['vendors'][vendor].keys()
else:
response = None
return response
def count(self):
'''
Get numbner of defined vendors
Returns:
int
'''
return len(self.config['vendors'])
def included(self, vendor):
'''
Check whether this vendor is configured
Parameters:
vendor (str): Vendor Identifier
Returns bool
'''
status = False
if vendor in self.vendors():
status = True
else:
status = False
return status
def vendor_description(self, vendor):
'''
Get description of vendor
Parameters:
vendor (str): Vendor Identifier
'''
desc = None
if self.included(vendor):
desc = self.config['vendors'][vendor]['description']
else:
desc = None
return desc
def vendor_prefix(self, vendor):
'''
Get the prefix is present as a string
Parameters:
vendor (str): Vendor Identifier
Returns:
string containing defined prefix or '' if none
'''
prefix = ''
if self.included(vendor):
if 'prefix' in self.vendor_keys(vendor):
prefix = self.config['vendors'][vendor]['prefix']
return prefix
def option_def(self, vendor):
'''
Returns option definition as dict
Parameters:
vendor (str): Vendor Identifier
Returns:
Dict containing both parent and sub option definitions
'''
opt_def = {}
if self.included(vendor):
if 'option-def' in self.vendor_keys(vendor):
opt_def = self.config['vendors'][vendor]['option-def']
else:
logging.error(f'No option definition for vendor {vendor}')
else:
logging.error(f'Vendor: {vendor} not defined')
return opt_def
def parent_opt_def(self, vendor):
'''
Returns parent-option definition as dict
Parameters:
vendor (str): Vendor Identifier
Returns:
dict containing parent option definition
'''
opt_def = {}
parent_def = {}
if self.included(vendor):
opt_def = self.option_def(vendor)
if 'parent-option' in opt_def.keys():
parent_def = opt_def['parent-option']
else:
logging.error(f'No parent-option for vendor {vendor}')
else:
logging.error(f'Vendor: {vendor} not defined')
return parent_def
def sub_options(self, vendor):
'''
Returns list of sub-option definitions
Parameters:
vendor (str): Vendor Identifier
Returns:
list of dict
'''
opt_def = {}
sub_opt_defs = []
if self.included(vendor):
opt_def = self.option_def(vendor)
if 'sub-options' in opt_def.keys():
sub_opt_defs = opt_def['sub-options']
else:
logging.error(f'No parent-option for vendor {vendor}')
else:
logging.error(f'Vendor: {vendor} not defined')
return sub_opt_defs
def dump_vendor_def(self, vendor):
'''
Returns the vendor definition as a dict
Parameters:
vendor (str): Vendor Identifier
Returns:
dict containing vendor definition
'''
vendor_def = {}
if self.included(vendor):
vendor_def = self.config['vendors'][vendor]
return vendor_def
# DHCP Decoding Utils
class dhcp_decode():
'''
Class to assist with Hex Encoding of
DHCP Options and sub_options
'''
def __init__(self) -> None:
self.opt_types = [ 'string',
'ip',
'array_of_ip',
'ipv4_address',
'ipv6_address',
'boolean',
'int8',
'uint8',
'int16',
'uint16',
'int32',
'uint32',
'fqdn',
'binary',
'empty' ]
self.fqdn_re, self.url_re = bloxone.utils.buildregex()
return
def hex_string_to_list(self, hex_string):
'''
Take a hex string and convert in to a list
Parameters:
hex_string (str): Hex represented as string
Returns:
list of hex bytes
'''
hex_list = []
# Remove colons if present
hex_string = hex_string.replace(':','')
# Turn hex_string into a list
for h in range(0, len(hex_string), 2):
hex_list.append(hex_string[h:h+2])
return hex_list
def hex_to_suboptions(self, hex_string, encapsulated=False):
'''
Extract the sub-options from the hex data
'''
hex_list = []
index = 0
subopt = {}
suboptions = []
opt_len = 0
opt_data = ''
opt_code = ''
# Turn hex_string into a list
hex_list = self.hex_string_to_list(hex_string)
# If encapsulated assume first two bytes
if encapsulated:
index = 2
while index <= (len(hex_list) - 2 ):
opt_code = hex_list[index]
opt_len = int(hex_list[index+1], 16)
# Get option data
for i in range(index + 2, (index + opt_len + 2)):
if i >= len(hex_list):
logging.error(f'Data encoding error, non-standard format')
break
else:
opt_data += hex_list[i]
# Build sub_opt and add to list of suboptions
sub_opt = { 'code': self.hex_to_optcode(opt_code),
'data_length': opt_len,
'data': opt_data }
suboptions.append(sub_opt)
# Reset opt_data
opt_data = ''
# Move index
index = index + opt_len + 2
return suboptions
def validate_ip(self, ip):
'''
Validate input data is a valid IP address
(Supports both IPv4 and IPv6)
Parameters:
ip (str): ip address as a string
Returns:
bool: Return True for valid and False otherwise
'''
try:
ipaddress.ip_address(ip)
result = True
except ValueError:
result = False
return result
# IP encondings
def hex_to_ip(self, hex_string):
'''
Decode a 4 or 16 octect hex string to an IPv4 or IPv6 string
Parameters:
hex_string (str): Hex representation of an IPv4 or IPv6 address
Returns:
IP Address as a string
'''
ip = ''
hex_string = hex_string.replace(':','')
no_of_octects = self.hex_length(hex_string)
# Check number of octets
if no_of_octects == '04' or no_of_octects == '10':
# Assume IPv4 or IPv6
int_ip = int(hex_string, 16)
if self.validate_ip(int_ip):
ip = ipaddress.ip_address(int_ip).exploded
else:
logging.error(f'{hex_string} not a valid IP Address')
ip = ''
else:
ip = ''
return ip
def hex_to_array_of_ip(self, hex_string):
'''
Decode array of IPv4 or IPv6 addresses to CSV string
Parameters:
hex_string (str): Hex representation of an array of IPv4 or IPv6
Returns:
IP Addresses in a CSV string
'''
array_of_ip = ''
hex_length = int(self.hex_length(hex_string),16)
if hex_length in [ 8, 12, 16, 20, 24 ]:
# Assume IPv4
for ip in [hex_string[n:n+8] for n in range(0, len(hex_string), 8)]:
dip = self.hex_to_ip(ip)
array_of_ip += dip + ','
elif hex_length in [ 32, 48, 64 ]:
# Assume IPv6
for ip in [hex_string[n:n+32] for n in range(0, len(hex_string), 32)]:
dip = self.hex_to_ip(ip)
array_of_ip += dip + ','
else:
array_of_ip = 'array_of_ip_failed.'
array_of_ip = array_of_ip[:-1]
return array_of_ip
# Methods for IPv4 and IPv6
def hex_to_ipv4_address(self, hex_string):
'''
Decode a hex string to an IPv4 Address as a string
Parameters:
hex_string (str): Hex representation of an IPv4 address
Returns:
IPv4 Address as a string
'''
hex_string = hex_string.replace(':','')
return self.hex_to_ip(hex_string)
def hex_to_ipv6_address(self, hex_string):
'''
Decode a hex string to an IPv6 address as a string
Parameters:
hex_string (str): Hex representation of an IPv6 address
Returns:
IPv6 Address as a string
'''
hex_string = hex_string.replace(':','')
return self.hex_to_ip(hex_string)
# String/text encoding
def hex_to_string(self, hex_string):
'''
Decode a string of hex values to a text string
Parameters:
hex_string (str): Hex representation of a string
Returns:
text string (str)
'''
hex_string = hex_string.replace(':','')
s = binascii.unhexlify(hex_string).decode()
return s
# Boolean encoding
def hex_to_boolean(self, hex_string):
'''
Decode Hex value as a string to 'true' or 'false'
Parameters:
hex_string (str): Hex value as a str
Returns:
string representation of a boolean
'''
hex_string = hex_string.replace(':','')
# Assume true if not zero i.e. check all bits for non-zero
if int(hex_string, 16) == 0:
text_bool = 'False'
else:
text_bool = 'True'
return text_bool
# integer encodings
def hex_to_int(self, hex_string, size=8):
'''
Decode hex to signed integer of defined size
Parameters:
hex_string (str): hex value as string
size (int): size in bits [8, 16, 32]
Returns:
integer
'''
value = 0
i_sizes = [ 8, 16, 32 ]
hex_string = hex_string.replace(':','')
i = int(hex_string, 16)
if size in i_sizes:
max_bits = size - 1
if i < (2**size):
if (i > (2**max_bits)):
value = -abs(i - (2**max_bits))
else:
value = i
else:
raise ValueError(f'{i} is out of range for int{size} type')
else:
raise ValueError(f'Size must be 8, 16, or 32')
return value
def hex_to_uint(self, hex_string, size=8):
'''
Encode integer of specified size as unsigned int in hex
Uses 2's compliment if supplied with negative number
Parameters:
i (int): integer value to encode
size (int): size in bits [8, 16, 32]
Returns:
hex encoding as string
'''
i_sizes = [ 8, 16, 32 ]
hex_string = hex_string.replace(':','')
i = int(hex_string, 16)
if size in i_sizes:
max_size = 2**size
if i < max_size:
value = i
else:
raise ValueError(f'{i} is out of range for uint{size} type')
else:
raise ValueError(f'Size must be 8, 16, or 32')
return value
# Methods for intX and uintX
def hex_to_int8(self, value):
return self.hex_to_int(value, size=8)
def hex_to_uint8(self, value):
return self.hex_to_uint(value, size=8)
def hex_to_int16(self, value):
return self.hex_to_int(value, size=16)
def hex_to_uint16(self, value):
return self.hex_to_uint(value, size=16)
def hex_to_int32(self, value):
return self.hex_to_int(value, size=32)
def hex_to_uint32(self, value):
return self.hex_to_uint(value, size=32)
# FDQN Encoding
def hex_to_fqdn(self, hex_string):
'''
Decode RFC 1035 Section 3.1 formatted hexa to fqdn
Parameters:
hex_string (str): hex encoded fqdn
Returns:
fqdn as string
'''
hex_list = []
index = 0
fqdn = ''
label_len = 0
label = ''
# Turn hex_string into a list
hex_list = self.hex_string_to_list(hex_string)
label_len = int(hex_list[index], 16)
while label_len != 0:
# Build label
for i in range(index + 1, (index + label_len + 1)):
label += hex_list[i]
# Build fqdn and reset label
fqdn += self.hex_to_string(label) + '.'
label = ''
# Reset index and check
index = index + label_len + 1
if index < len(hex_list):
label_len = int(hex_list[index], 16)
else:
logging.warning('Reach end before null')
label_len = 00
return fqdn
# Binary Encoding
def hex_to_binary(self, data):
'''
Format hex string of binary/hex encoded data
Parameters:
data (str): data to format
Returns:
hex encoding as string
'''
hex_value = ''
base = 16
# Check for binary
if data[:2] == '0b':
base = 2
else:
hex_string = data.replace(':','')
# Force hex encoding without 0x using base
hex_value = '{:02x}'.format(int(data, base))
return hex_value
# Empty Encoding
def hex_to_empty(self, data):
'''
Return empyt hex string ''
Parameters:
data (str): Data not to encode (should be empty)
Returns:
Empty String ''
'''
if data:
data = ''
return data
# Code and Length encoding
def hex_to_optcode(self, hex_string):
'''
Encode Option Code in hex (1-octet)
Parameters:
optcode (str/int): Option Code
Returns:
hex encoding as string
'''
opt_code = self.hex_to_int8(hex_string)
return opt_code
def hex_length(self, hex_string):
'''
Encode Option Length in hex (1-octet)
Parameters:
hex_string (str): Octet Encoded Hex String
Returns:
Number of Hex Octects as hex encoded string
'''
hex_string = hex_string.replace(':','')
hex_len = '{:02x}'.format(int(len(hex_string) / 2))
return hex_len
def check_data_type(self, optcode, sub_defs=[]):
'''
Get data_type for optcode from sub optino definitions
Parameters:
optcode (int): Option code to check
sub_defs (list of dict): sub option definitions to cross
reference
Returns:
data_type as str
'''
data_type = ''
if sub_defs:
for d in sub_defs:
if int(optcode) == int(d['code']):
data_type = d['type']
# Check for array_of_ip
if 'array' in d.keys():
if ('ip' in data_type) and d['array']:
data_type = 'array_of_ip'
break
return data_type
def get_name(self, optcode, sub_defs=[]):
'''
Get data_type for optcode from sub optino definitions
Parameters:
optcode (int): Option code to check
sub_defs (list of dict): sub option definitions to cross
reference
Returns:
name as str
'''
name = ''
if sub_defs:
for d in sub_defs:
if optcode == d['code']:
name = d['name']
break
return name
def guess_data_type(self, subopt, padding=False):
'''
'''
data_type = ''
data_types = []
dl = subopt['data_length']
data = subopt['data'].replace(':','')
# Check for 1 byte first
if dl == 1:
# int8 or bool (so treat as int8)
# data_types.append('int8')
data_type = 'int8'
else:
# We know it has more than one byte
if data[-2:] == '00' and not padding:
# Possible FQDN
logging.debug('Checking fqdn guess')
fqdn = self.hex_to_fqdn(subopt['data'])
# Validate FQDN
if bloxone.utils.validate_fqdn(fqdn, self.fqdn_re):
logging.debug('FQDN verified')
data_types.append('fqdn')
data_type = 'fqdn'
if dl in [4, 16]:
logging.debug('CHecking for type ip')
if self.hex_to_ip(data):
data_types.append('ip')
data_type = 'ip'
if dl in [8, 32]:
logging.debug('Checking for array of ip')
if self.hex_to_ip(data[:dl]):
data_types.append('ip')
data_type = 'array_of_ip'
if data_type == '':
logging.debug('Default guess of string')
data_type = 'string'
return data_type
def decode_data(self, data, data_type='string',
padding=False,
pad_bytes=1,
array=False):
'''
'''
decoded = ''
if data_type in self.opt_types:
if 'ip' in data_type and array:
data_type = 'array_of_ip'
hex_to_type = eval('self.' + 'hex_to_' + data_type)
else:
logging.error(f'Unsupported Option Type {data_type}')
logging.info('Unsupported option type, ' +
'attempting to process as string')
hex_to_type = eval('self.hex_to_string')
decoded = hex_to_type(data)
return decoded
def decode_dhcp_option(self,
hex_string,
sub_opt_defs=[],
padding=False,
pad_bytes=1,
encapsulated=False,
id=None,
prefix=''):
'''
Attempt to decode DHCP options from hex representation
Parameters:
sub_opt_defs (list): List of Sub Option definition dictionaries
padding (bool): Whether extra 'null' termination bytes are req.
pad_bytes (int): Number of null bytes to append
encapsulate (bool): Add id and total length as prefix
id (int): option code to prepend
prefix (str): String value to prepend to encoded options
Returns:
Encoded suboption as a hex string
'''
value = ''
str_value = ''
suboptions = []
de_sub_opt = {}
decoded_opts = []
parent = {}
guessed = False
hex_string = hex_string.replace(':','')
if (len(hex_string) % 2) == 0:
if encapsulated:
parent_opt = self.hex_to_opcode(hexstring[:2])
total_len = self.hex_to_int8(hexstring[2:4])
hex_string = hex_string[4:]
parent = {'parent': parent_opt, 'total_len': total_len }
decoded_opts.append(parent)
# Break out sub-options
suboptions = self.hex_to_suboptions(hex_string)
# Attempt to decode sub_options
for opt in suboptions:
if sub_opt_defs:
data_type = self.check_data_type(opt['code'],
sub_defs=sub_opt_defs)
name = self.get_name(opt['code'], sub_defs=sub_opt_defs)
else:
logging.debug(f'Attempting to guess option type for {opt}')
data_type = self.guess_data_type(opt)
guessed = True
name = 'Undefined'
if data_type:
value = self.decode_data(opt['data'], data_type=data_type)
str_value = self.decode_data(opt['data'], data_type='string')
de_sub_opt = { 'name': name,
'code': opt['code'],
'type': data_type,
'data_length': opt['data_length'],
'data': value,
'data_str': str_value,
'guess': guessed }
decoded_opts.append(de_sub_opt)
else:
logging.error('Hex string contains incomplete octets')
return decoded_opts
def output_decoded_options(self, decoded_opts=[], output='pprint'):
'''
Simple output for decode_dhcp_options() data
Parameters:
decoded_opts (list): List of dict
output (str): specify format [pprint, csv, yaml]
'''
formats = [ 'csv', 'pprint', 'yaml']
header = ''
head_printed = False
line = ''
if len(decoded_opts):
if output in formats:
# Output simply with pprint
if output == 'pprint':
pprint(decoded_opts)
# Output to CSV
if output == 'csv':
for item in decoded_opts:
if 'parent' in item.keys():
header = 'Parent, Total Length'
pprint(header)
pprint(f'{item["parent"]}, ' +
f'{item["total_len"]}')
elif not head_printed:
header = ''
for key in item.keys():
header += key + ','
header = header[:-1]
pprint(header)
head_printed = True
else:
for key in item.keys():
line += repr(item[key]) + ','
line += line[:-1]
pprint(line)
line = ''
# Output to normalised YAML
if output == 'yaml':
try:
y = yaml.safe_dump(decoded_opts)
print(y)
except:
print('Could not convert to yaml')
else:
print(f'{output} not supported for output')
print(f'Suported formats include: {formats}')
print(decoded_opts)
else:
print('No option data')
return
def tests(self):
'''
Run through encoding methods and output example results
'''
encode = bloxone.dhcp_encode()
test_data = [ { 'code': '1', 'type': 'string',
'data': 'AABBDDCCEEDD-aabbccddeeff' },
{ 'code': '2', 'type': 'ipv4_address',
'data': '10.10.10.10' },
{ 'code': '3', 'type': 'ipv4_address',
'data': '10.10.10.10,11.11.11.11', 'array': True },
{ 'code': '4', 'type': 'boolean', 'data': True },
{ 'code': '5', 'type': 'int8', 'data': '22' },
{ 'code': '5', 'type': 'int8', 'data': '-22' },
{ 'code': '6', 'type': 'uint8', 'data': '22' },
{ 'code': '7', 'type': 'int16', 'data': '33'},
{ 'code': '8', 'type': 'int16', 'data': '33'},
{ 'code': '9', 'type': 'uint16', 'data': '33'},
{ 'code': '10', 'type': 'int32', 'data': '44'},
{ 'code': '11', 'type': 'uint32', 'data': '-44'},
{ 'code': '12', 'type': 'uint32', 'data': '44'},
{ 'code': '13', 'type': 'fqdn',
'data': 'www.infoblox.com' },
{ 'code': '14', 'type': 'binary', 'data': 'DEADBEEF' },
{ 'code': '15', 'type': 'empty', 'data': ''},
{ 'code': '16', 'type': 'ipv6_address',
'data': '2001:DB8::1' },
{ 'code': '17', 'type': 'ipv6_address',
'data': '2001:DB8::1,2001:DB8::2', 'array': True } ]
print(f'Decoding types supported: {self.opt_types}')
print()
print('Non-array tests:')
for data_test in test_data:
enc_str = encode.encode_data(data_test)
if 'array' in data_test.keys():
array = data_test['array']
else:
array=False
dec_str = self.decode_data(enc_str,
data_type=data_test['type'],
array=array)
print(f'Type: {data_test["type"]}, Hex: {enc_str}, ' +
f'Decoded: {dec_str}, Original: {data_test["data"]}')
print()
# Padding Test
# test_data = { 'code': '99', 'type': 'string', 'data': 'AABBCCDD' }
# result = encode.encode_data(test_data, padding=True)
# print(f'Padding test (1 byte), type string: {test_data["data"]}' +
# f' {result}')
# Full encode test
test_data = [ { 'code': '1', 'type': 'string',
'data': 'AABBDDCCEEDD-aabbccddeeff' },
{ 'code': '2', 'type': 'ipv4_address',
'data': '10.10.10.10' },
{ 'code': '3', 'type': 'ipv4_address',
'data': '10.10.10.10,11.11.11.11', 'array': True },
{ 'code': '4', 'type': 'boolean', 'data': True },
{ 'code': '5', 'type': 'int8', 'data': '22' } ]
result = encode.encode_dhcp_option(test_data)
print(f'Full encoding of sample Hex: {result}')
decode = self.decode_dhcp_option(result, sub_opt_defs=test_data)
print(f'Decoding result:')
self.output_decoded_options(decode)
return
``` |
{
"source": "johnneijzen/John-Random-Programs-And-Ideas-2018",
"score": 4
} |
#### File: other/python/word_count_and_character_count.py
```python
def main():
userinput = input("Enter a sentence:")
wordcount = len(userinput.split())
charactercount = len(userinput)
charactercountnospacechars = len(userinput.replace(' ', ''))
print("The total word count is: " + str(wordcount))
print("The total character count: " + str(charactercount))
print("The total character count without space : " +
str(charactercountnospacechars))
main()
``` |
{
"source": "johnneijzen/Words-Of-Inches",
"score": 3
} |
#### File: johnneijzen/Words-Of-Inches/main.py
```python
import discord
from discord.ext import tasks, commands
import random
bot = commands.Bot(command_prefix='$kj ')
@bot.command()
async def test(ctx, arg):
await ctx.send(arg)
@bot.event
async def on_ready():
channel = bot.get_channel(712996600573722657)
await channel.send('Words Of Inches Has Awoken')
@bot.event
async def on_member_join(member):
channel = bot.get_channel(712996600573722657)
fmt = 'Welcome {0.mention} to The Holy Church of the Inch God!'
await channel.send(fmt.format(member))
@bot.event
async def on_voice_state_update(member, before, after):
if before.channel is None and after.channel is not None:
if after.channel.id == 712996600573722658:
if member.id == 358424524968165407:
channel = bot.get_channel(712996600573722657)
await channel.send('@everyone God Has Awake')
if before.channel is not None and after.channel is None:
if before.channel.id == 712996600573722658:
if member.id == 358424524968165407:
channel = bot.get_channel(712996600573722657)
await channel.send('@everyone God Has Left Us')
@tasks.loop(minutes=30)
async def message_of_the_day():
channel = bot.get_channel(712996600573722657)
msg = ['Pollute not thy mind with the heresy of false gods', 'Thou shall respect all living things that have sprung up from the seed of the great Heyrosa (except Kpop stans they are an abomination unto the great Lord)', 'Thou shall refer to every living being as a “he”','Thou shall spread the good word like thine ass cheeks.', 'Thou shall not insult the name of our savior and creator','Thou shall pollute the body, not the mind.','Thou must chill when in the presence of fine women.']
await channel.send(msg[random.randint(0,6)])
@message_of_the_day.before_loop
async def before():
await bot.wait_until_ready()
print("Finished waiting")
@bot.command(pass_context = True)
async def vcmembers(ctx):
voice_channel = bot.get_channel(712996600573722658)
members = voice_channel.members
print(members)
message_of_the_day.start()
bot.run('token')
``` |
{
"source": "johnne/its_workflow",
"score": 3
} |
#### File: workflow/scripts/revcomp.py
```python
from argparse import ArgumentParser
from Bio.Seq import reverse_complement
def revcomp(seq):
return reverse_complement(seq)
def main(args):
r = revcomp(args.seq)
print(r, end="")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("seq", type=str,
help="Input fasta string")
args = parser.parse_args()
main(args)
``` |
{
"source": "johnne/map_metaT",
"score": 3
} |
#### File: workflow/scripts/common.py
```python
import pandas as pd
from collections import defaultdict
def parse_samples(f):
df = pd.read_csv(f, sep="\t", index_col=0)
samples = defaultdict(lambda: defaultdict(dict))
for sample, d in df.iterrows():
sample_id = f"{sample}_{d['unit']}"
samples[sample_id]["fq1"] = d["fq1"]
samples[sample_id]["fq2"] = d["fq2"]
return samples
def parse_assemblies(f, datadir="data/"):
df = pd.read_csv(f, sep="\t", index_col=0)
assemblies = df.to_dict(orient="index")
return assemblies
def clean_featurecount(sm):
import os
dataf = pd.DataFrame()
for f in sm.input.tsv:
sample = os.path.basename(f).replace(".fc.tsv", "")
df = pd.read_csv(f, sep="\t", comment="#", usecols=[0, 1, 5, 6])
df.columns = ["Geneid", "Chr", "Length", sample]
df.index = df.Chr.map(str) + ["_" + x.split("_")[-1] for x in df.Geneid]
df.drop(["Geneid","Chr"], axis=1, inplace=True)
dataf = pd.merge(dataf, df, left_index=True, right_index=True, how="outer")
try:
dataf.drop("Length_y", axis=1, inplace=True)
except KeyError:
continue
dataf.rename(columns={"Length_x": "Length"}, inplace=True)
dataf.to_csv(sm.output.tsv, sep="\t")
def process_and_sum(q_df, annot_df):
# Merge annotations and abundance
# keep ORFs without annotation as "Unclassified"
annot_q_df = pd.merge(annot_df, q_df, left_index=True, right_index=True,
how="right")
annot_q_df.fillna("Unclassified", inplace=True)
feature_cols = annot_df.columns
annot_q_sum = annot_q_df.groupby(list(feature_cols)).sum().reset_index()
annot_q_sum.set_index(feature_cols[0], inplace=True)
return annot_q_sum
def sum_to_features(abundance, parsed):
parsed_df = pd.read_csv(parsed, index_col=0, sep="\t")
abundance_df = pd.read_csv(abundance, index_col=0, sep="\t")
abundance_df.drop("Length", axis=1, inplace=True, errors="ignore")
feature_sum = process_and_sum(abundance_df, parsed_df)
return feature_sum
def count_features(sm):
"""
Counts reads mapped to features such as KOs, PFAMs etc.
:param sm:
:return:
"""
feature_sum = sum_to_features(sm.input.abund, sm.input.annot[0])
feature_sum.to_csv(sm.output[0], sep="\t")
def main(sm):
toolbox = {"clean_featurecount": clean_featurecount,
"count_features": count_features}
toolbox[sm.rule](sm)
if __name__ == "__main__":
main(snakemake)
``` |
{
"source": "johnne/nbis-meta",
"score": 3
} |
#### File: workflow/scripts/taxonomy_utils.py
```python
import sys
import pandas as pd
def add_lower(df, ranks):
"""
Propagates assignments from higher to lower taxonomic ranks,
and adds a 'Unclassified.' prefix.
:param df: pandas DataFrame
:param ranks: ranks for which to propagate
:return:
"""
for i in df.index:
last_known = df.loc[i, ranks[0]]
for rank in ranks[1:]:
if df.loc[i, rank] != "Unclassified":
last_known = df.loc[i, rank]
else:
if last_known == "Unclassified":
df.loc[i, rank] = last_known
else:
df.loc[i, rank] = "Unclassified.{}".format(last_known)
return df
def contigtax_mash(sm):
# Keep stats on assignments
# resolved = cases where sourmash helped resolve assignments
# transferred = cases where blast-based assignments were overwritten
# added = cases where assignments from sourmash were added
# total = total number of contigs
stats = {'resolved': 0, 'transferred': 0, 'added': 0, 'total': 0}
df1 = pd.read_csv(sm.input.smash, sep=",", header=0, index_col=0)
stats['total'] = df1.shape[0]
df2 = pd.read_csv(sm.input.contigtax[0], sep="\t", header=0, index_col=0)
ranks = list(df2.columns)
ranks.reverse()
# Only use subset of contigs with matches
df1 = df1.loc[df1["status"] == "found", df2.columns]
df1.fillna("Unclassified", inplace=True)
# Get common set of contigs
common = set(df1.index).intersection(set(df2.index))
for contig in common:
s = df1.loc[contig]
b = df2.loc[contig]
for rank in ranks:
# If sourmash has an assignment at this rank
if s[rank] != "Unclassified":
# If blast-based contains 'Unclassified',
# mark contig as resolved
if "Unclassified" in b[rank]:
stats['resolved'] += 1
# Otherwise, mark contig as transferred
else:
stats['transferred'] += 1
# As soon as a contig has been transferred or resolved
# we can stop the merge
df2.loc[contig] = df1.loc[contig]
break
# If sourmash does not have an assignment at this rank
else:
# but blast-based does have an assignment,
# then the blast-based is more resolved and we can stop
# trying to merge
if "Unclassified" not in b[rank]:
break
# Get contigs in sourmash missing from blast
missing1 = set(df1.index).difference(set(df2.index))
if len(missing1) > 0:
stats['added'] += len(missing1)
df2 = pd.concat([df2, df1.loc[missing1]])
df2 = add_lower(df2, df2.columns)
df2.to_csv(sm.output[0], sep="\t")
# Write to log
with open(sm.log[0], 'w') as fhout:
fhout.write("Total: {}\n".format(stats['total']))
fhout.write("Resolved: {}\n".format(stats['resolved']))
fhout.write("Transferred: {}\n".format(stats["transferred"]))
fhout.write("Added: {}\n".format(stats['added']))
def contigtax_assign_orfs(sm):
"""
Transfers taxonomic assignments from contigs down to ORFs called on contigs
:param sm: snakemake object
:return:
"""
gff_df=pd.read_csv(sm.input.gff, header=None, sep="\t", comment="#",
usecols=[0, 8], names=["contig", "id"])
# Extract ids
ids=["{}_{}".format(gff_df.loc[i, "contig"],
gff_df.loc[i, "id"].split(";")[0].split("_")[-1]) for i in gff_df.index]
gff_df.loc[:, "id"]=ids
# Read taxonomy for contigs
tax_df=pd.read_csv(sm.input.tax, header=0, sep="\t", index_col=0)
# Merge dataframes
orf_tax_df=pd.merge(gff_df, tax_df, left_on="contig",
right_index=True, how="outer")
# When using 'outer' merging there may be contigs with no called ORF
# but with a tax assignment. Drop these contigs.
orf_tax_df=orf_tax_df.loc[orf_tax_df["id"]==orf_tax_df["id"]]
# Set Unclassified for NA values
orf_tax_df.fillna("Unclassified", inplace=True)
# Set index to ORF ids
orf_tax_df.set_index("id", inplace=True)
orf_tax_df.drop("contig", axis=1, inplace=True)
orf_tax_df.to_csv(sm.output.tax[0], sep="\t", index=True, header=True)
def main(sm):
toolbox = {"merge_contigtax_sourmash": contigtax_mash,
"contigtax_assign_orfs": contigtax_assign_orfs}
toolbox[sm.rule](sm)
if __name__ == "__main__":
main(snakemake)
``` |
{
"source": "JohnNesbit/ML-for-photgrammetry-in-mesh-space",
"score": 3
} |
#### File: JohnNesbit/ML-for-photgrammetry-in-mesh-space/pic-model.py
```python
import tensorflow as tf
from open3d import *
import os
import PIL.Image as Image
bat_path = "E:/PycharmProjects/3DMesh_Development/data_stuff/run_seg2102" # where bat file is
epoch_save_path = "E:/PycharmProjects/3DMesh_Development/data_stuff/idk_epoch.off" # where to save off file to
lsave_path = "E:/PycharmProjects/3DMesh_Development/PHOTOS_GAN_SAVES/" # dir for epoch benchmarks
xtrain = np.array([])
picpath = "E:/PycharmProjects/3DMesh_Development/pictures/" # dir with pictures
npicpath = "E:/PycharmProjects/3DMesh_Development/data_stuff/tmp/idk_epoch" # dir for renderings
for x in os.listdir(picpath):
ia = Image.open(picpath + x)
ia = np.array(ia.convert('L'))
print(ia.shape)
xtrain = np.append(xtrain, ia)
print(xtrain.shape)
vertex_amt = [1000]
sess = tf.Session()
dw_1 = tf.get_variable(name="dw_1", shape=[2, 2, 1, 1], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
db_1 = tf.get_variable(name="db_1", shape=[816, 612], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
dw_2 = tf.get_variable(name="dw_2", shape=[2, 2, 1, 1], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
db_2 = tf.get_variable(name="db_2", shape=[102, 153], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
dw_3 = tf.get_variable(name="dw_3", shape=[2, 2, 1, 1], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
db_3 = tf.get_variable(name="db_3", shape=[51, 51], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
dw_4 = tf.get_variable(name="dw_4", shape=[2601, 1], initializer=tf.initializers.random_normal(stddev=0.02),
# start shape changing via matmul
trainable=True, dtype=tf.float32)
db_4 = tf.get_variable(name="db_4", shape=[1], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
# start of 3D GEN variables
gw_1 = tf.get_variable(name="gw_1", shape=[2, 2, 1, 1], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
gb_1 = tf.get_variable(name="gb_1", shape=[1, 3264, 1224, 1], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
gw_2 = tf.get_variable(name="gw_2", shape=[2, 2, 1, 1], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
gb_2 = tf.get_variable(name="gb_2", shape=[1, 816, 306, 1], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
gw_3 = tf.get_variable(name="gw_3", shape=[2, 2, 1, 1], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
gb_3 = tf.get_variable(name="gb_3", shape=[1, 816, 102, 1], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32) # know shape
gw_4 = tf.get_variable(name="gw_4", shape=[1, 816, 102, 3], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
gb_4 = tf.get_variable(name="gb_4", shape=[1, 816, 3, 1], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32) # know
gw_5 = tf.get_variable(name="gw_5", shape=[1, 3, 816, 1000], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
gb_5 = tf.get_variable(name="gb_5", shape=[3, 1000, 1, 1], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
gw_6 = tf.get_variable(name="gw_6", shape=[1, 3, 1000, 1333], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
gb_6 = tf.get_variable(name="gb_6", shape=[1333, 3], initializer=tf.initializers.random_normal(stddev=0.02),
trainable=True, dtype=tf.float32)
sess.run(tf.global_variables_initializer())
def generator(image):
# do calculations
# problem Incompatible shapes between op input and calculated input gradient not with biases
print("gen prints start:")
g = tf.nn.conv2d(image, gw_1, strides=[1, 2, 4, 1], padding="SAME")
g = g + gb_1
g = tf.nn.leaky_relu(g)
g = tf.reshape(g, [1, 3264, 1224, 1])
g = tf.nn.conv2d(g, gw_2, strides=[1, 4, 4, 1], padding="SAME")
print(g.shape)
g = g + gb_2
g = tf.nn.leaky_relu(g)
g = tf.nn.conv2d(g, gw_3, strides=[1, 1, 3, 1], padding="SAME")
g = g + gb_3
g = tf.nn.relu(g)
print(g.shape)
g = tf.reshape(g, shape=[1, 816, 1, 102]) # ---debugged up to here---
g = tf.matmul(g, gw_4)
print("m4:")
print(g.shape)
g = tf.reshape(g, shape=[1, 816, 3, 1])
g = g + gb_4
g = tf.nn.relu(g)
print(g.shape)
g = tf.reshape(g, shape=[1, 3, 1, 816])
g = tf.reshape(tf.matmul(g, gw_5), shape=[3, 1000, 1, 1]) + gb_5
g = tf.nn.softmax(g)
print(g.shape)
g = tf.reshape(g, shape=[1, 3, 1, 1000])
g = tf.matmul(g, gw_6)
g = tf.reshape(g, shape=[1333, 3])
g = g + gb_6
g = tf.nn.softmax(g)
print("g end")
print(g.shape)
return g
def discriminator(data):
# do calculations
print("descriminator stars here:")
d = tf.nn.conv2d(data, filter=dw_1, strides=[1, 8, 8, 1], padding="SAME")
print(d.shape)
d = tf.reshape(d, shape=[816, 612])
d = d + db_1
d = tf.nn.relu(d)
d = tf.reshape(d, shape=[1, 816, 612, 1])
print(d.shape)
d = tf.nn.conv2d(d, filter=dw_2, strides=[1, 8, 4, 1], padding="SAME")
d = tf.reshape(d, shape=[102, 153])
d = d + db_2
d = tf.nn.relu(d)
d = tf.reshape(d, shape=[1, 102, 153, 1])
print(d.shape)
d = tf.nn.conv2d(d, filter=dw_3, strides=[1, 2, 3, 1], padding="SAME")
d = tf.reshape(d, shape=[51, 51])
d = d + db_3
d = tf.nn.relu(d)
print(d.shape)
d = tf.reshape(d, shape=[1, 2601])
d = tf.matmul(d, dw_4)
d = tf.add(d, db_4)
d = tf.nn.relu(d)
d = tf.reshape(d, shape=[1])
print(d.shape)
print("d end")
return d
def save_func(xdata, path):
nls = ""
predarr = ""
print("seg starts")
print(xdata.shape)
filx = open(path, "w+", encoding="ASCII")
for lll in range(1001):
item = xdata[lll]
str_arr = np.array_str(item)
nls = str_arr.replace("[", "").replace("]", "")
predarr = predarr + nls
predarr = predarr + "\n"
for iii in range(334):
item = xdata[iii]
item = np.multiply(np.around(item, decimals=2), 100)
str_arr = np.array_str(item)
for ii in range(str_arr.count(".") + 1):
str_arr = str_arr.replace(".", " ")
predarr = predarr + "3 " + str_arr.replace("[", "").replace("]", "")
predarr = predarr + "\n"
prepd = "OFF\n" + "1000 333 0\n" + predarr
for iiii in range(prepd.count(" ") + 1):
prepd.replace(" ", " ")
for iiiii in range(prepd.count(" ") + 1):
prepd.replace(" ", " ")
filx.write(prepd)
filx.close()
filxr = open(path, "r+")
for xix in filxr.readlines():
if "OFF" or "1000 333" in xix:
continue
lsit_nxix = list(xix)
lsit_nxix.remove(" ")
str_nxix = str(lsit_nxix)
nls = nls + str_nxix + "\n"
filxr.write(nls)
filxr.close()
def segment(xdata):
nxtrain = np.array([])
nls = ""
predarr = ""
print("seg starts")
print(xdata.shape)
filx = open(epoch_save_path, "w", encoding="ASCII")
for lll in range(1001):
item = xdata[lll]
str_arr = np.array_str(item)
nls = str_arr.replace("[", "").replace("]", "")
predarr = predarr + nls
predarr = predarr + "\n"
for iii in range(334):
item = xdata[iii]
item = np.multiply(np.around(item, decimals=2), 100)
str_arr = np.array_str(item)
for ii in range(str_arr.count(".") + 1):
str_arr = str_arr.replace(".", " ")
predarr = predarr + "3 " + str_arr.replace("[", "").replace("]", "")
predarr = predarr + "\n"
prepd = "OFF\n" + "1000 333 0\n" + predarr
for iiii in range(prepd.count(" ") + 1):
prepd.replace(" ", " ")
for iiiii in range(prepd.count(" ") + 1):
prepd.replace(" ", " ")
filx.write(prepd)
filx.close()
filxr = open(epoch_save_path, "r+")
for xix in filxr.readlines():
if "OFF" or "1000 333" in xix:
continue
lsit_nxix = list(xix)
lsit_nxix.remove(" ")
str_nxix = str(lsit_nxix)
nls = nls + str_nxix + "\n"
filxr.write(nls)
filxr.close()
os.system(bat_path)
for xx in os.listdir(npicpath):
nia = Image.open(npicpath + "/" + xx)
#nia = crop(nia, []) # neeeded dims=(2448, 3264)
nia = np.array(nia.convert('L'))
print(nia.shape)
nxtrain = np.append(nxtrain, nia)
nxtrain.reshape([1, 6528, 4896, 1])
return tf.reshape(tf.convert_to_tensor(nxtrain, dtype=tf.float32), shape=[1, 6528, 4896, 1])
# temparary data values that we can swap easily.
x_placeholder = tf.placeholder(tf.float32, shape=[1, 6528, 4896, 1], name='x_placeholder') # 2d image tf placeholder
xtrain = xtrain.reshape([2, 1, 6528, 4896, 1]) # 2, 13056, 9792
print(xtrain.shape)
def pro_pcd_arr(g):
lg = list(sess.run(g, {x_placeholder: xtrain[1]}).tolist())
for k in range(lg.count([0, 0, 0])):
lg.remove([0, 0, 0])
np.array(lg)
return lg
# descrim should be 2D convs and GZ outputs segmented
Dx = discriminator(x_placeholder) # 2d input image against seg model
# 😃
Gz = pro_pcd_arr(generator(x_placeholder)) # reg gan makes 3d model from 2d pic
print("line 167")
Dg = discriminator(segment(np.array(Gz))) # seg model vs 2d real image
print("worked")
# defines loss functions for models
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.constant([1], shape=[1], dtype=tf.float32),
logits=Dg))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.constant([1], shape=[1],
dtype=tf.float32), logits=Dg))
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.constant([0], shape=[1],
dtype=tf.float32), logits=Dx))
d_loss = d_loss_real + d_loss_real
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd' and "_" in var.name]
g_vars = [var for var in t_vars if 'g' and "_" in var.name]
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE) as scope:
d_trainer_fake = tf.train.AdamOptimizer(0.0001).minimize(d_loss_fake, var_list=d_vars)
d_trainer_real = tf.train.AdamOptimizer(0.0001).minimize(d_loss_real, var_list=d_vars)
g_trainer = tf.train.AdamOptimizer(0.0001).minimize(g_loss, var_list=g_vars, colocate_gradients_with_ops=True)
d_real_count_ph = tf.placeholder(tf.float32)
d_fake_count_ph = tf.placeholder(tf.float32)
g_count_ph = tf.placeholder(tf.float32)
d_on_generated = tf.reduce_mean(discriminator(segment(np.array(pro_pcd_arr(generator(x_placeholder))))))
d_on_real = tf.reduce_mean(discriminator(x_placeholder))
# initializes all variables with tensorflow
merged = tf.summary.merge_all()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
d_fake_count = 0
g_count = 0
d_real_count = 0
# define loss vars
gLoss = 0
dLossFake, dLossReal = 1, 1
# training loop
for i in range(2):
real_image_batch = xtrain[i]
# Train discriminator on generated images
_, dLossReal, dLossFake, gLoss = sess.run([d_trainer_fake, d_loss_real, d_loss_fake, g_loss],
{x_placeholder: real_image_batch})
d_fake_count += 1
# Train the generator
sess.run([g_trainer, d_loss_real, d_loss_fake, g_loss],
{x_placeholder: real_image_batch})
g_count += 1
# train d on real images
sess.run([d_trainer_real, d_loss_real, d_loss_fake, g_loss],
{x_placeholder: real_image_batch})
d_real_count += 1
d_real_count, d_fake_count, g_count = 0, 0, 0
if i % 5 == 0:
print("TRAINING STEP", i)
print("Descriminator_loss:" + str(dLossReal))
#mdsaver
sess.run(save_func(np.array(pro_pcd_arr(generator(x_placeholder))),
path= lsave_path + "save" + str(i) + ".off"))
if i % 20 == 0:
save_path = saver.save(sess, "models/pretrained_3ddcgan.ckpt", global_step=i)
print("saved to %s" % save_path)
``` |
{
"source": "johnne/tango-rat",
"score": 3
} |
#### File: tango-rat/contigtax/shred_fasta.py
```python
import random
from Bio import SeqIO
from argparse import ArgumentParser
import sys
def read_seqs(f):
return SeqIO.to_dict(SeqIO.parse(f, "fasta"))
def shred(d, prefix=None, existing=False, contigs=10000, minsize=500,
maxsize=10000):
"""
Generate random shreds of input fasta file
:param d: Dictionary of sequences
:param prefix: Prefix string to append to random contigs
:param existing: Use existing prefix string ('|' splits prefix)
:param contigs: Number of contigs to generate
:param minsize: Minimum size of contigs
:param maxsize: Maximum size of contigs
:return: Dictionary of randomly shredded contigs
"""
random.seed(42)
shreds = {}
keys = list(d.keys())
for i in range(0, contigs):
# pick a random contig
key = random.choice(keys)
if existing:
prefix = key.split("|")[0]
if prefix is not None:
contig_id = ">{}|contig{}".format(prefix, i)
else:
contig_id = ">contig{}".format(i)
keylen = len(d[key])-1
# pick a random length
rand_len = random.randrange(minsize, maxsize)
# if random length is bigger than contig, choose entire contig
if rand_len >= keylen:
shreds[contig_id] = str(d[key].seq)
continue
# choose whether to start from beginning or end
if random.choice(["start", "end"]) == "start":
# if choosing from beginning, pick a random position between
# the first nucleotide and contig_length - rand_length
rand_start = random.randrange(0, keylen-rand_len)
rand_end = rand_start+rand_len
else:
rand_end = random.randrange(rand_len, keylen)
rand_start = rand_end-rand_len
rand_seq = d[key][rand_start:rand_end]
shreds[contig_id] = rand_seq.seq
return shreds
def write_shreds(shreds):
l = []
for contig_id in sorted(shreds.keys()):
seq = shreds[contig_id]
l.append(len(seq))
sys.stdout.write("{}\n{}\n".format(contig_id, str(seq)))
import numpy as np
sys.stderr.write(
"""
min: {min}
max: {max}
median: {median}
mean: {mean}
""".format(min=np.min(l), max=np.max(l), median=np.median(l),
mean=np.mean(l)))
def main(args):
seqs = read_seqs(args.infile)
shreds = shred(seqs, args.prefix, args.use_prefix, args.contigs,
args.minsize, args.maxsize)
write_shreds(shreds)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("infile", type=str, help="Input fasta file")
parser.add_argument("--prefix", type=str, help="prefix to add to ids")
parser.add_argument("--use-prefix", action="store_true",
help="Use already existing prefix for sequences")
parser.add_argument("--minsize", type=int, default=500,
help="Minimum contig size")
parser.add_argument("--maxsize", type=int, default=10000,
help="Maximum contig size")
parser.add_argument("--contigs", type=int, default=10000,
help="Contigs to generate")
args = parser.parse_args()
main(args)
``` |
{
"source": "Johnnetto/FHIRSnake",
"score": 3
} |
#### File: fhir/client/humanname.py
```python
import fhir.client.primitive
import fhir.client.complex
class HumanName:
"""
A human name with the ability to identify parts and usage.
Names may be changed, or repudiated, or people may have different names in different contexts.
Names may be divided into parts of different type that have variable significance depending on
context, though the division into parts does not always matter. With personal names, the different
parts may or may not be imbued with some implicit meaning; various cultures associate different
importance with the name parts and the degree to which systems must care about name parts around
the world varies widely.
"""
def __init__(self):
# Identifies the purpose for this name.
self.__use = fhir.client.primitive.Code('home')
# A full text representation of the name.
self.__text = ' '
# The part of a name that links to the genealogy. In some cultures (e.g. Eritrea) the family name of
# a son is the first name of his father.
self.__family = []
# Given name.
self.__given = []
# Part of the name that is acquired as a title due to academic, legal, employment or nobility
# status, etc. and that appears at the start of the name.
self.__prefix = []
# Part of the name that is acquired as a title due to academic, legal, employment or nobility
# status, etc. and that appears at the end of the name.
self.__suffix = []
# Indicates the period of time when this name was valid for the named person.
self.__period = fhir.client.complex.Period('', '')
@property
def use(self):
return self.__use
@use.setter
def use(self, use):
if not isinstance(use, fhir.client.primitive.Code) and not isinstance(use, str):
raise TypeError('A code value must be of type Code or str.')
string_use = use
if isinstance(use, fhir.client.primitive.Code):
string_use = use.__str__()
if string_use != 'usual' and string_use != 'official' and string_use != 'temp' and string_use != 'anonymous' \
and string_use != 'old' and string_use != 'maiden':
raise ValueError('A Code value must be usual, official, temp, anonymous, old or maiden.')
self.__use = fhir.client.primitive.Code(string_use)
@property
def text(self):
return self.__text
@text.setter
def text(self, text):
if not isinstance(text, str):
raise TypeError('A text value must be of type str.')
self.__text = text
@property
def period(self):
return self.__period
@period.setter
def period(self, period):
if not isinstance(period, fhir.client.complex.Period):
raise TypeError('A period value must be of type Period.')
def str(self):
return 'text representation'
``` |
{
"source": "johnnewman/PiSecurityCam",
"score": 3
} |
#### File: watchtower/camera/__init__.py
```python
import json
import logging
import os
import picamera
import time
from threading import Lock
class SafeCamera (picamera.PiCamera):
"""
A camera class that provides a safe mechanism for multiple threads to
capture an image using ``jpeg_data`` or get/set the monitoring status.
"""
def __init__(self, name, resolution, framerate, config_path):
super(SafeCamera, self).__init__(resolution=resolution, framerate=framerate)
self.__should_monitor = True
self.__should_record = False
self.__motion_detected = False
self.__lock = Lock()
self.__jpeg_lock = Lock()
self.__jpeg_data = b''
self.__name = name
self.__config_path = config_path
self.load_config()
@property
def name(self):
return self.__name
@property
def motion_detected(self):
self.__lock.acquire()
motion_detected = self.__motion_detected
self.__lock.release()
return motion_detected
@motion_detected.setter
def motion_detected(self, value):
self.__lock.acquire()
self.__motion_detected = value
self.__lock.release()
@property
def should_record(self):
self.__lock.acquire()
should_record = self.__should_record
self.__lock.release()
return should_record
@should_record.setter
def should_record(self, value):
self.__lock.acquire()
self.__should_record = value
self.__lock.release()
@property
def should_monitor(self):
self.__lock.acquire()
should_monitor = self.__should_monitor
self.__lock.release()
return should_monitor
@should_monitor.setter
def should_monitor(self, value):
self.__lock.acquire()
self.__should_monitor = value
self.__lock.release()
@property
def jpeg_data(self):
data = None
self.__jpeg_lock.acquire()
data = self.__jpeg_data
self.__jpeg_lock.release()
return data
@jpeg_data.setter
def jpeg_data(self, value):
self.__jpeg_lock.acquire()
self.__jpeg_data = value
self.__jpeg_lock.release()
def load_config(self):
if os.path.exists(self.__config_path):
try:
with open(self.__config_path, 'r') as f:
config = json.load(f)
self.update_config_params(config)
except Exception as e:
logging.getLogger(__name__).exception('Exception reading %s file. Purging the file! Exception: %s' % (self.__config_path, e))
try:
os.remove(self.__config_path)
except Exception as e2:
pass
else:
logging.getLogger(__name__).info('\"%s\" file does not exist.' % self.__config_path)
def save_config(self):
params = self.config_params()
try:
with open(self.__config_path, 'w') as f:
f.write(json.dumps(params, indent=2, sort_keys=True))
except Exception as e:
logging.getLogger(__name__).exception('Exception saving %s file: %s' % (self.__config_path, e))
def update_config_params(self, params):
self.should_monitor = False
if 'awb_mode' in params:
awb_mode = params['awb_mode']
if awb_mode in picamera.PiCamera.AWB_MODES:
self.awb_mode = awb_mode
if 'brightness' in params:
self.brightness = int(params['brightness'])
if 'contrast' in params:
self.contrast = int(params['contrast'])
if 'exposure_compensation' in params:
self.exposure_compensation = int(params['exposure_compensation'])
if 'exposure_mode' in params:
exposure_mode = params['exposure_mode']
if exposure_mode in picamera.PiCamera.EXPOSURE_MODES:
self.exposure_mode = exposure_mode
if 'image_effect' in params:
image_effect = params['image_effect']
if image_effect in picamera.PiCamera.IMAGE_EFFECTS:
self.image_effect = image_effect
if 'iso' in params:
self.iso = int(params['iso'])
if 'meter_mode' in params:
meter_mode = params['meter_mode']
if meter_mode in picamera.PiCamera.METER_MODES:
self.meter_mode = meter_mode
if 'rotation' in params:
self.rotation = int(params['rotation'])
if 'saturation' in params:
self.saturation = int(params['saturation'])
if 'sharpness' in params:
self.sharpness = int(params['sharpness'])
if 'video_denoise' in params:
self.video_denoise = bool(params['video_denoise'])
self.save_config()
self.should_monitor = True
return self.config_params()
def config_params(self):
return dict(
awb_mode=self.awb_mode,
brightness=self.brightness,
contrast=self.contrast,
exposure_compensation=self.exposure_compensation,
exposure_mode=self.exposure_mode,
image_effect=self.image_effect,
iso=self.iso,
meter_mode=self.meter_mode,
rotation=self.rotation,
saturation=self.saturation,
sharpness=self.sharpness,
video_denoise=self.video_denoise
)
```
#### File: watchtower/streamer/mjpeg_streamer.py
```python
import io
import sys
from .stream_saver import StreamSaver
from ..remote.servo import Servo
class MJPEGStreamer(StreamSaver):
"""
A streamer that captures individual JPEG frames from the camera.
"""
def __init__(self, camera, byte_writers, name, servo=None, rate=1):
super(MJPEGStreamer, self).__init__(stream=io.BytesIO(),
byte_writers=byte_writers,
name=name,
stop_when_empty=False)
self.__camera = camera
self.__servo = servo
self.read_wait_time = 1/rate
def read(self, position, length=None):
"""
Overridden to capture a new JPEG into the stream each call.
:param position: Not used. Position will always be set to 0.
:param length: Not used. The superclass is expected to read all the
JPEG data.
:return: The superclass data from ``read``.
"""
self.stream.seek(0) # Always reset to 0
self.stream.truncate(0) # Dump the old data
self.stream.write(self.__camera.jpeg_data)
return super(MJPEGStreamer, self).read(0, length=sys.maxsize)
def ended(self):
"""
Overridden to flip the servo back off if the camera is not running.
"""
super(MJPEGStreamer, self).ended()
if not self.__camera.should_monitor and \
self.__servo is not None:
self.__servo.disable()
```
#### File: watchtower/tests/conftest.py
```python
import os
import pytest
import sys
test_data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
watchtower_path = os.path.dirname(os.path.realpath(__file__))
for i in range(2):
watchtower_path = os.path.split(watchtower_path)[0]
if watchtower_path not in sys.path:
print('Inserted \"%s\" into system paths.' % watchtower_path)
sys.path.insert(0, watchtower_path)
@pytest.fixture(scope="function")
def random_data():
return os.urandom(1024*1024*10) # 10 megabytes
@pytest.fixture(scope="session")
def test_data_path():
return test_data_dir
@pytest.fixture(scope="session")
def installation_path():
return watchtower_path
```
#### File: streamer/writer/test_dropbox_writer.py
```python
import math
import os
import pytest
import subprocess
import time
from watchtower.streamer.writer import dropbox_writer
from watchtower.streamer.writer.disk_writer import DiskWriter
def test_dropbox_writer_integration(writer, random_data, tmp_path):
"""
Integration test to feed a DropboxWriter chunks of data and verify that the
decrypted data is identical to the input data. A MockDropboxUploader is
used to output to the tmp_path instead of Dropbox.
"""
# Append chunks of bytes to the DropboxWriter instance. This simulates a
# live feed.
append_count = 20
amount_to_read = len(random_data)//append_count
for i in range(append_count):
data = random_data[i*amount_to_read:(i+1) * amount_to_read]
writer.append_bytes(data, close=(i == append_count-1)) # Close on the last chunk.
# Wait for writers to stop.
while not writer.is_finished_writing():
time.sleep(0.05)
# Read in all of the data that the DropboxWriter output to disk.
files = os.listdir(tmp_path)
files.sort(key=lambda name: int(name.strip('test_file').strip('.bin'))) # Sort them into [test_file0.bin, test_file1.bin, ...]
written_data = ''.encode()
for file_name in files:
with open(os.path.join(tmp_path, file_name), 'rb') as f:
written_data += f.read()
# Assert that multiple files were written to disk.
assert(len(files) > 0)
assert(len(files) == math.ceil(len(random_data)/dropbox_writer.DEFAULT_FILE_CHUNK_SIZE))
# Assert the writer's input data is identical to the data output to disk.
assert(written_data == random_data)
def test_dropbox_writer_encrypted_integration(encrypted_writer, random_data, tmp_path, installation_path):
"""
Integration test to feed a DropboxWriter chunks of data, decrypt the
output, and verify that the decrypted data is identical to the input data.
A MockDropboxUploader is used to output to the tmp_path instead of Dropbox.
This also serves as a good test for decrypt.py, by decrypting each file
output by the DropboxWriter and verifying that the bytes are identical to
the original.
"""
# Append chunks of bytes to the DropboxWriter instance. This simulates a
# live feed.
append_count = 20
amount_to_read = len(random_data)//append_count
for i in range(append_count):
data = random_data[i*amount_to_read:(i+1) * amount_to_read]
encrypted_writer.append_bytes(data, close=(i == append_count-1)) # Close on the last chunk.
# Wait for writers to stop.
while not encrypted_writer.is_finished_writing():
time.sleep(0.05)
# The installation path is one directory up from the package path.
private_key_path = os.path.join(tmp_path, 'private.pem')
decrypt_script_path = os.path.join(installation_path, 'ancillary', 'decryption', 'decrypt.py')
# Read in all of the data that the DropboxWriter output to disk. Ignore the .pem files.
files = list(filter(lambda name: name.endswith('.bin'), os.listdir(tmp_path)))
files.sort(key=lambda name: int(name.strip('test_file').strip('.bin'))) # Sort them into [test_file0.bin, test_file1.bin, ...]
written_data = ''.encode()
for file_name in files:
in_path = os.path.join(tmp_path, file_name)
out_path = os.path.join(tmp_path, file_name + '.dec')
# Decrypt each file using the decrypt.py program.
subprocess.call(['python', decrypt_script_path,
'-k', private_key_path,
'-i', in_path,
'-o', out_path])
# Append the decrypted data.
with open(out_path, 'rb') as f:
written_data += f.read()
# Assert that multiple files were written to disk.
assert(len(files) > 1)
assert(len(files) == math.ceil(len(random_data)/dropbox_writer.DEFAULT_FILE_CHUNK_SIZE))
# Assert the writer's input data is identical to the data output to disk.
assert(written_data == random_data)
# ---- Fixtures
@pytest.fixture
def writer(tmp_path):
return dropbox_writer.DropboxWriter(os.path.join(tmp_path, 'test_file.bin'),
dropbox_token="",
test_dropbox_uploader=MockDropboxUploader())
@pytest.fixture
def encrypted_writer(tmp_path):
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import rsa, padding
# Generate a private and public key and save these in the tmp_path.
private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
private_pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption())
with open(os.path.join(tmp_path, 'private.pem'), 'wb') as private_out:
private_out.write(private_pem)
public_key = private_key.public_key()
public_pem = public_key.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
with open(os.path.join(tmp_path, 'public.pem'), 'wb') as public_out:
public_out.write(public_pem)
return dropbox_writer.DropboxWriter(os.path.join(tmp_path, 'test_file.bin'),
dropbox_token="",
public_pem_path=os.path.join(tmp_path, 'public.pem'),
test_dropbox_uploader=MockDropboxUploader())
# ---- Mock objects
class MockDropboxUploader():
"""
Mock object to be used in place of a dropbox object. Each call to
files_upload will create a new file on disk.
"""
def files_upload(self, bts, path):
writer = DiskWriter(path)
writer.append_bytes(bts, close=True)
```
#### File: watchtower/util/file_system.py
```python
import os
from datetime import datetime
import shutil
def __dirnames_matching_format(dirnames, format):
"""
Iterates through dirnames and returns a sorted array of directory names
that match the provided format.
"""
matching_dates = []
for dirname in dirnames:
try:
dt = datetime.strptime(dirname, format)
if dt is not None:
matching_dates.append(dt)
except ValueError:
pass
matching_dates.sort(reverse=True)
return [datetime.strftime(dt, format) for dt in matching_dates]
def all_recordings(path, day_format, time_format):
"""
Iterates through the provided directory path and returns an array of
dictionaries where each dictionary represents one day.
"""
recordings = []
days = all_recording_days(path, day_format)
for day in days:
recordings.append({
'day': day,
'times': all_recording_times_for_day(path, day, time_format)
})
return recordings
def all_recording_days(path, day_format):
"""
Iterates through the provided directory path and returns an array of all
day directories that match the provided format.
"""
dirpath, dirnames, filenames = next(os.walk(path))
return __dirnames_matching_format(dirnames, day_format)
def all_recording_times_for_day(path, day_dirname, time_format):
"""
Iterates through the provided day directory path and returns an array of
all time directories that match the provided format.
"""
path = os.path.join(path, day_dirname)
dirpath, dirnames, filenames = next(os.walk(path))
return __dirnames_matching_format(dirnames, time_format)
def delete_recording(path, day_dirname, time_dirname=None):
"""
If a time_dirname is supplied, this will delete the time directory within
the provided day directory. Otherwise if just a day_dirname is supplied,
the day's whole directory tree will be deleted.
"""
path = os.path.join(path, day_dirname)
if time_dirname is not None:
path = os.path.join(path, time_dirname)
if os.path.exists(os.path.dirname(path)):
try:
shutil.rmtree(path)
return True
except Exception as ex:
print(ex)
return False
``` |
{
"source": "johnnewto/cellsegment",
"score": 2
} |
#### File: cellsegment/cellsegment/dataprep_utils.py
```python
__all__ = ['CLASS_LABELS', 'FILENAME_TRIM', 'crop2well', 'well_circle_mask', 'check_well_crop', 'resize_crop2well_one',
'shuffle_csv', 'split_filenames', 'get_image_size', 'make_label_img_from_json', 'create_one_label',
'resize_dir', '__crop_image', 'crop_img_dir', 'move_files_to_dir', 'old_move_files_to_dir',
'create_labels_dir']
#Cell
import random
from cellsegment.core import *
from .json_utils import *
import json
import shutil
import pandas
from fastai.vision import *
from skimage.filters import threshold_otsu
from skimage.color import rgb2gray
from skimage.measure import label, regionprops
from skimage.morphology import closing, square
from skimage.color import label2rgb
from skimage.exposure import histogram
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
import os
#Cell
CLASS_LABELS = {
'Background': {'Code': 0, 'Fill': (0, 0, 0, 127)},
'Liver Fluke': {'Code': 11, 'Fill': (255, 0, 0, 127)},
'Rumen Fluke': {'Code': 40, 'Fill': (0, 255, 0, 127)},
'Other': {'Code': 255, 'Fill': (0, 0, 255, 127)}
}
FILENAME_TRIM = False
#Cell
def crop2well(img, thres_adjust=1., bg_color=[0,0,0], op='crop'):
'''Returns a square colour and gray image centered on the well with width same as the height.
thres_adjust is optional otsu threshold scaling multiplier
Also returns the offset to the original image and the well region properties'''
assert op=='mask' or op=='crop', 'op must be either "mask" or "crop"), the value was {}'.format(op)
width = img.shape[0]
img_gray = rgb2gray(img)
thresh = threshold_otsu(img_gray)
img_thresholded = closing(img_gray > thresh * thres_adjust, square(3))
label_image = label(img_thresholded)
well_region = None
for region in regionprops(label_image):
if region.area >= 100000:
well_region = region
# if region_center == 0: raise ValueError('No region_center found')
r0, c0, r1, c1 = well_region.bbox
if op=='mask':
well_circle_mask = pad(well_region.convex_image, (img.shape[:2]), well_region.bbox[:2])
img_crop = img[r0:r1, c0:c1]
img_rgb = pad(img_crop, img.shape, well_region.bbox[:2], bg_color=bg_color)
return img_rgb, img_thresholded, well_circle_mask, well_region
elif op=='crop':
well_center = (c0 + c1)//2
offset = well_center - width//2
img_rgb = img[:, offset:well_center + width//2, :]
return img_rgb, offset
#Cell
def well_circle_mask(img, well_regionprops):
return pad(well_regionprops.convex_image, (img.shape[:2]), well_regionprops.bbox[:2])
#Cell
def check_well_crop(img, well_regionprops):
assert img.shape[0] == img.shape[1], "Expecting the cropped well image to be square"
bbox = well_regionprops.bbox
bbox_area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
bbox_squareness = (bbox[2] - bbox[0]) / (bbox[3] - bbox[1])
bbox_percent = bbox_area / (img.shape[0] * img.shape[0])
max_well_area = 3.14156 * img.shape[0] * img.shape[0] / 4
illum_well_percent = well_regionprops.area / max_well_area
print('bbox_percent', bbox_percent)
print('bbox_squareness', bbox_squareness)
print('illum_well_percent', illum_well_percent)
print('illum_well_saturation', illum_well_percent)
#Cell
def resize_crop2well_one(fn, i, src_path, dest_path, height):
img = PIL.Image.open(src_path / fn.name)
img_w, img_h = img.size
scale = float(height / img_h)
shape = (int(img_w * scale), height)
img_cropped, offset = crop2well(np.asarray(img.resize(shape)))
img_cropped = PIL.Image.fromarray(img_cropped.astype('uint8'), 'RGB')
img_cropped.save(dest_path / f'{fn.stem}|{scale:.6f}|{offset:3}|.jpg', quality=90)
# def resize_crop2center_one(fn, i, src_path, dest_path, height):
# img = PIL.Image.open(src_path / fn.name)
# img_w, img_h = img.size
# scale = float(height / img_h)
# shape = (int(img_w * scale), height)
# img_cropped, offset = crop2well(np.asarray(img.resize(shape)))
# img_cropped = PIL.Image.fromarray(img_cropped.astype('uint8'), 'RGB')
# img_cropped.save(dest_path / f'{fn.stem}|{scale:.6f}|{offset:3}|.jpg', quality=90)
#Cell
def shuffle_csv(file_csv,random_state=None):
df = pandas.read_csv(file_csv)
df = df.sample(frac=1, random_state=random_state).reset_index(drop=True)
df.to_csv(file_csv, index=False)
#Cell
def split_filenames(file_csv, num_train=0.70, num_val=0.15):
"""
shuffle file names, split into train valid and test and update file_data.csv with labels
:param file_csv: csv file with file names and data
:param num_train:
:param num_val:
:param seed:
:return:
"""
df = pandas.read_csv(file_csv)
__number_files = df.shape[0]
# add Op column
operation = ['Train'] * int(__number_files * num_train)
operation = operation + ['Valid'] * int(__number_files * num_val)
operation = operation + ['Test'] * (__number_files - len(operation))
df['Op'] = operation
df.to_csv(file_csv, index=False)
return None
#Cell
def get_image_size(fn):
return PIL.Image.open(fn).size
#Cell
def make_label_img_from_json(jdata, img, radius=20):
"""
returns a label img with filled circles at places of interest
json data contains info to markup an image with circles
"""
if isinstance(img, np.ndarray): img = PIL.Image.fromarray(img)
lab_img = PIL.Image.new('P', img.size)
DEFAULT_LABEL_FILL = (255, 255, 255, 64)
DEFAULT_LABEL_CODE = (255)
num_points = 0
label_cnt_dict = { cls[0]: {'Code': cls[1]['Code'], 'Count': 0} for cls in CLASS_LABELS.items()}
# for s, sh in enumerate(jdata['shapes']):
for sh in jdata['shapes']:
fill = DEFAULT_LABEL_FILL
try:
it = CLASS_LABELS[sh['label']]
fill = it['Fill']
code = it['Code']
label_cnt_dict[sh['label']]['Count'] += 1
num_points += 1
except:
print("Error in parsing CLASS_LABELS", sh['label'])
# draw a circle at the circle
if sh["shape_type"] == "circle":
xy = np.asarray(sh["points"])
# ave = np.mean(xy, axis=0)
# cx, cy = ave[0], ave[1]
cx, cy = xy[0][0], xy[0][1]
draw = PIL.ImageDraw.Draw(img)
draw.ellipse([cx - radius, cy - radius, cx + radius, cy + radius], outline=0, fill=fill)
draw = PIL.ImageDraw.Draw(lab_img)
draw.ellipse([cx - radius, cy - radius, cx + radius, cy + radius], outline=0, fill=code)
num_points += 1
return np.asarray(img), np.asarray(lab_img), num_points, label_cnt_dict
#Cell
def create_one_label(fn, json_path, lab_path, colormap):
if isinstance(fn, str):
fn = Path(fn)
data = json.load(open(f'{json_path}/{fn.stem}.json'))
# img = PIL.Image.open(f'{src_path}/{fn.stem}.jpg')
img = PIL.Image.new('RGB', (data["imageWidth"], data["imageHeight"]), color=(0, 0, 0))
# h = data["imageHeight"]
# w = data["imageWidth"]
img, lab_img, num_pnts, label_cnt_list = make_label_img_from_json(data, img, radius=10)
# ToDo: fix this code as it is not extendable
lbl_pil_1 = (img[:, :, 0] >= 200) & (img[:, :, 1] <= 10) & (img[:, :, 2] <= 10)
lbl_pil_2 = (img[:, :, 1] >= 200) & (img[:, :, 0] <= 10) & (img[:, :, 2] <= 10)
lbl_pil = lbl_pil_1 + 2 * lbl_pil_2
sav_pil = PIL.Image.fromarray(lbl_pil.astype(np.uint8), mode='P')
sav_pil.putpalette(colormap)
sav_pil.save(f'{lab_path}/{fn.stem}.png')
#Cell
def resize_dir(file_data, src_path, dest_path, number_files='all', height=1200):
"""
Resize an entire directory. Store in the dest directory
:param src_path:
:param dest_path:
:param number_files:
:param height:
:return:
"""
df = pandas.read_csv(file_data)
__number_files = df.shape[0]
print(f'Number of image files: {__number_files}, Number to resize: {number_files}')
Path(dest_path).mkdir(parents=True, exist_ok=True)
if isinstance(number_files, int):
__number_files = number_files
for i in range(__number_files):
f_stem = df.loc[i,'Name'].split('.')[0]
scale = float(height) / df.loc[i,'Height']
img = resize_file(f'{src_path}/{f_stem}.jpg', scale=scale)
img.save(f'{dest_path}/{f_stem}.jpg', quality=90)
progress_bar(i + 1, 50)
return
#Cell
def __crop_image(jsonfn, imgfn, dest_path, size=200, op:str='', debug=False):
"""
Crop an image into multiple sub-tiles centered on each egg
:param jsonfn: JSON file with egg centers
:param imgfn: image to be cropped
:param dest_path: directory to put cropped tiles into
:param size: pixel size of crop
:param filename_trim: remove the scale and offset portions of filename and remove extra spaces, 226260 - 1-0.436047-221-.jpg -> 226260-1.jpg
:return: number of cropped images
"""
img = np.asarray(PIL.Image.open(imgfn))
data = json.load(open(jsonfn))
suffix = Path(imgfn).suffix
dest_path = Path(dest_path)
imgfn = Path(imgfn).stem
assert suffix == '.jpg' or suffix == '.png', "image file type must be jpg or png"
imgShape = img.shape
colormap = colormap_segmentation_labels()
n = 0
croplist = []
for n, sh in enumerate(data['shapes']):
if sh["shape_type"] == "circle":
xy = np.asarray(sh["points"])
ave = np.mean(xy, axis=0)
cx = min(int(round(ave[1])), imgShape[0] - size // 2)
cx = max(cx, size // 2)
cy = min(int(round(ave[0])), imgShape[1] - size // 2)
cy = max(cy, size // 2)
minr = cx - size // 2; maxr = cx + size // 2
minc = cy - size // 2; maxc = cy + size // 2
crop = img[minr:maxr, minc:maxc]
# trim offset and scale from filename
if FILENAME_TRIM:
a, scale, offset, b = imgfn.split('|')
savefn = f'{a}'.replace(" ", "")
else:
savefn = imgfn
savefn = f'{savefn}-{n}{suffix}'
# savefn = f'{dest_path}/{savefn}-{n}{suffix}'
# print('saving', savefn)
if not debug:
if suffix == '.jpg':
PIL.Image.fromarray(crop.astype(np.uint8)).save(dest_path/savefn, quality=90)
elif suffix == '.png':
lbl_pil = PIL.Image.fromarray(crop.astype(np.uint8), mode='P')
lbl_pil.putpalette(colormap)
lbl_pil.save(dest_path/savefn)
else:
print('x', end='')
item = {'Name': savefn, 'Label': sh["label"], 'Op': op}
croplist.append(item)
# print(f'Saved {n} files in {dest_path}')
return croplist
#Cell
def crop_img_dir(file_data:str, json_path:str, src_path:str, dest_path:str, number_files='all', DEBUG=False):
"""
Crop directory of image files based on json centers and store in dest directory
- `file_data:` file_data.csv
- `src_path:` path where json files and image files to be cropped are
- `dest_path:` Destination path to store cropped files
- `number_files:` Number of json files to process, leave empty for all files in directory
- `size:` pixel size of crop
- `return:` total number of cropped images`
"""
if DEBUG: print (f'Debug = {DEBUG} so not saving files')
# assert subdir in ['Train', 'Val', 'Test', None], "subdir must one of 'Train', 'Val', 'Test' or None"
src_df = pandas.read_csv(file_data)
if number_files == 'all':
__number_files = src_df.shape[0]
elif isinstance(number_files, int) :
__number_files = number_files
else:
__number_files = 0
json_path = Path(json_path)
src_path = Path(src_path)
dest_path = Path(dest_path)
(dest_path/'Train').mkdir(parents=True, exist_ok=True)
(dest_path/'Test').mkdir(parents=True, exist_ok=True)
(dest_path/'Label').mkdir(parents=True, exist_ok=True)
(dest_path/'Error').mkdir(parents=True, exist_ok=True)
print(f'{__number_files} files to process in {src_path}' )
misslist = []
croplist = []
vallist = []
for i in range(__number_files):
imgfn = src_df.loc[i,'Name']
f_stem = imgfn.split('.')[0]
labfn = f'{f_stem}.png'
jsonfn = f'{f_stem}.json'
if (src_path/imgfn).exists():
fn = imgfn
file_type = "IMAGE"
elif (src_path/labfn).exists():
fn = labfn
file_type = "LABEL"
else:
fn = None
if (json_path/jsonfn).exists() and fn is not None:
op = src_df.loc[i, 'Op']
if file_type == "LABEL":
cropdir = dest_path / 'Label'
elif op == 'Valid' or src_df.loc[i, 'Op'] == 'Train':
cropdir = dest_path / 'Train'
elif op == 'Test':
cropdir = dest_path / 'Test'
else:
print(f'File: {imgfn} has no Op label or is not a label file')
cropdir = dest_path / 'Error'
# if op == 'Valid' and file_type == "IMAGE":
# vallist += lst
lst = __crop_image(json_path/jsonfn, src_path/fn, cropdir, op=op, size=200, debug=DEBUG)
croplist += lst
progress_bar(i + 1, 50)
else:
misslist.append(f_stem)
print('')
print(f'Missed {len(misslist)} and Cropped {len(croplist)} files in {dest_path}')
# savefn = dest_path / 'valid.txt'
# print(f"Saving {len(vallist)} valid file names in {savefn}")
# if len(vallist) > 0:
# with open(savefn, "w") as a:
# for item in vallist:
# fn = item['Name']
# if Path(fn).suffix == '.jpg' :
# a.write(fn + os.linesep)
return misslist, croplist
#Cell
def move_files_to_dir( movefiles, srcpath, destpath, operation='move', extns=['.jpg','.json','.png']):
"""
Move random shuffle of the source directory to the Train, Val and Test directories
:param movefiles: list of files to move
:param srcpath: path where to src train files
:param destpath: path where to put test files
:param extns: list of extensions to try
:return: cnt of files_moved, files_missed
"""
files_moved = [0]*3
files_missed = [0]*3
Path(destpath).mkdir(parents=True, exist_ok=True)
for fn in movefiles:
fn = Path(f'{srcpath}/{fn}')
for i, ext in enumerate(extns):
if fn.with_suffix(ext).exists():
if operation == 'copy':
shutil.copy(str(fn.with_suffix(ext)), str(destpath))
elif operation == 'move':
shutil.move(str(fn.with_suffix(ext)), str(destpath))
files_moved[i] += 1
else:
files_missed[i] += 1
print( "Files Moved", files_moved)
print( "Files Missed", files_missed)
# print(f'Moved {len(jpg_cnt)} jpg files, {json_cnt} json files and {png_cnt} png files to {movefiles}')
return files_moved, files_missed
#Cell
def old_move_files_to_dir( movefiles, srcpath, destpath, operation='move', extns=['.jpg','.json','.png']):
"""
Move random shuffle of the source directory to the Train, Val and Test directories
:param movefiles: list of files to move
:param srcpath: path where to src train files
:param destpath: path where to put test files
:param extns: list of extensions to try
:return: cnt of files_moved, files_missed
"""
files_moved = [0]*3
files_missed = [0]*3
Path(destpath).mkdir(parents=True, exist_ok=True)
for fn in movefiles:
fn = Path(f'{srcpath}/{fn}')
for i, ext in enumerate(extns):
if fn.with_suffix(ext).exists():
if operation == 'copy':
shutil.copy(str(fn.with_suffix(ext)), str(destpath))
elif operation == 'move':
shutil.move(str(fn.with_suffix(ext)), str(destpath))
files_moved[i] += 1
else:
files_missed[i] += 1
print( "Files Moved", files_moved)
print( "Files Missed", files_missed)
# print(f'Moved {len(jpg_cnt)} jpg files, {json_cnt} json files and {png_cnt} png files to {movefiles}')
return files_moved, files_missed
#Cell
def create_labels_dir(json_path, dest_path, number_files='all'):
"""
Create label png images based on CSV files and store in dest directory
:param json_path: Source path where json files are
:param dest_path: Destination path to store created png label files
:param number_files: Number of json files to process, leave empty for all files in directory
:return: nil
"""
lab_path = Path(dest_path)
lab_path.mkdir(parents=True, exist_ok=True)
fnames_json = sorted(get_files(json_path, extensions=['.json']))
if isinstance(number_files, int):
fnames_json = fnames_json[:number_files]
colormap = colormap_segmentation_labels()
print("Number of json files to process", len(fnames_json))
if 1:
for i,fn in enumerate(fnames_json):
create_one_label(fn, json_path, lab_path, colormap)
progress_bar(i + 1, 50)
else:
parallel(partial(create_one_label, json_path=json_path, lab_path=lab_path, colormap=colormap), fnames_json, leave=True)
print("Number of labels files created", len(fnames_json))
``` |
{
"source": "johnnewto/flask-video-streaming",
"score": 3
} |
#### File: johnnewto/flask-video-streaming/camera_FLIR_2.py
```python
__all__ = ['Camera_FLIR', ]
import os
import cv2
from time import sleep
# from base_camera import BaseCamera
import threading
from FLIRCam.USB_camera import Camera as FLIRCamera
import PySpin
class Camera_FLIR():
# video_source = 0
def __init__(self, cam: FLIRCamera ):
self.cam = cam
self.frame = None
# BaseCamera.__init__(self)
# super(Camera_FLIR, self).__init__()
self.stopped = False
self.thread = threading.Thread(target=self.frames, args=())
# self.thread.daemon = True
self.thread.start()
def get_frame(self):
"""Return the current camera frame."""
# BaseCamera.last_access = time.time()
#
# # wait for a signal from the camera thread
# BaseCamera.event.wait()
# BaseCamera.event.clear()
# Todo 'wait for event'
return self.frame
# @staticmethod
def frames(self):
# self.cam = FLIRCamera(model='ptgrey', identity=self.identity, name=self.name)
# Start acquisition
self.cam.start()
# Wait for a while
sleep(1)
if not self.cam.is_running:
raise RuntimeError('Could not start camera.')
while True:
# read current frame
# img = self.cam.get_next_image()
frame = self.cam.GetNextImage()
image_converted = frame.Convert(PySpin.PixelFormat_RGB8)
image_converted = image_converted.GetNDArray()
# encode as a jpeg image and return it
# yield cv2.imencode('.jpg', image_converted)[1].tobytes()
self.frame = cv2.imencode('.jpg', image_converted)[1].tobytes()
def stop(self):
"""indicate that the thread should be stopped"""
self.stopped = True
# wait until stream resources are released (producer thread might be still grabbing frame)
self.thread.join()
``` |
{
"source": "johnnewto/FLIR-pubsub",
"score": 2
} |
#### File: FLIR-pubsub/FLIR_pubsub/FLIR_camera_calibration_utils.py
```python
__all__ = ['Capture']
# Cell
import imutils
import cv2
from imutils.video import FPS
import zmq
import numpy as np
import time
class Capture:
def __init__(self, name='FrontLeft', url='localhost'):
self.PORT = 5555
self.width = 1000
self.height = 750
self.name = name
self.url = url
self.socket_sub = None
self.context = None
def _recv_array(self, socket:zmq.Context.socket, flags=0, copy=True, track=False):
"""recv a numpy array"""
md = socket.recv_json(flags=flags)
msg = socket.recv(flags=flags, copy=copy, track=track)
buf = memoryview(msg)
A = np.frombuffer(buf, dtype=md['dtype'])
# return (A.reshape(md['shape']), md)
return (A, md)
def _recv_frame(self, socket):
""" Receive and process an image from camera"""
try:
# Get the reply.
topic = socket.recv_string()
rec_frame, md = self._recv_array(socket)
rec_frame = cv2.imdecode(rec_frame, cv2.IMREAD_GRAYSCALE)
rec_frame = cv2.cvtColor(rec_frame, cv2.COLOR_BAYER_BG2BGR)
rec_frame = rec_frame.reshape((3000, 4000, 3))
# rec_frame = imutils.resize(rec_frame, width=width, height=height)
# cv2.putText(rec_frame, f'Received frame {md}',
# (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
except Exception as e:
rec_frame = np.ones((self.width, self.height))
topic = 'cam1'
md = None
# cv2.putText(rec_frame, f'error: {e}',
# (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)
print (f"error: message timeout {e}")
time.sleep(1)
return topic, rec_frame, md
def _poll_server(self):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect(f"tcp://{self.url}:{self.PORT + 1}")
socket.setsockopt(zmq.LINGER, 0)
poller = zmq.Poller()
poller.register(socket, flags=zmq.POLLIN)
socket.send_string(f"keep_alive {self.name}")
result = dict(poller.poll(1000))
poller.unregister(socket)
def open_client(self, name='FrontLeft', url='localhost'):
""" Received frames from a single camera. Must have the server running"""
self.context = zmq.Context()
# subscribe socket
print( "Connecting to server...")
self.socket_sub = self.context.socket(zmq.SUB)
self.socket_sub.connect( f"tcp://{url}:{self.PORT}")
self.socket_sub.setsockopt_string(zmq.SUBSCRIBE, name)
def close_client(self):
self.socket_sub.close()
self.context.term()
cv2.destroyAllWindows()
print('Finished')
# def get_image(self):
# self._poll_server()
# try:
# topic, rec_frame, md = self._recv_frame(self.socket_sub)
# # rec_frame = imutils.resize(rec_frame, width=2400, height=1800)
# except KeyboardInterrupt:
# pass
# return topic, rec_frame, md
def fetch_image(self):
self.open_client()
self._poll_server()
topic, rec_frame, md = self._recv_frame(self.socket_sub)
self.close_client()
return topic, rec_frame, md
```
#### File: FLIR-pubsub/FLIR_pubsub/mraa.py
```python
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError('Python 2.7 or later required')
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_mraa')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_mraa')
_mraa = swig_import_helper()
del swig_import_helper
del _swig_python_version_info
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if name == "thisown":
return self.this.own(value)
if name == "this":
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if not static:
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if name == "thisown":
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class uint8Array(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, uint8Array, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, uint8Array, name)
__repr__ = _swig_repr
def __init__(self, nelements):
this = _mraa.new_uint8Array(nelements)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _mraa.delete_uint8Array
def __del__(self):
return None
def __getitem__(self, index):
return _mraa.uint8Array___getitem__(self, index)
def __setitem__(self, index, value):
return _mraa.uint8Array___setitem__(self, index, value)
def cast(self):
return _mraa.uint8Array_cast(self)
if _newclass:
frompointer = staticmethod(_mraa.uint8Array_frompointer)
else:
frompointer = _mraa.uint8Array_frompointer
# Register uint8Array in _mraa:
_mraa.uint8Array_swigregister(uint8Array)
def uint8Array_frompointer(t):
return _mraa.uint8Array_frompointer(t)
uint8Array_frompointer = _mraa.uint8Array_frompointer
INTEL_GALILEO_GEN1 = _mraa.INTEL_GALILEO_GEN1
INTEL_GALILEO_GEN2 = _mraa.INTEL_GALILEO_GEN2
INTEL_EDISON_FAB_C = _mraa.INTEL_EDISON_FAB_C
INTEL_DE3815 = _mraa.INTEL_DE3815
INTEL_MINNOWBOARD_MAX = _mraa.INTEL_MINNOWBOARD_MAX
RASPBERRY_PI = _mraa.RASPBERRY_PI
BEAGLEBONE = _mraa.BEAGLEBONE
BANANA = _mraa.BANANA
INTEL_NUC5 = _mraa.INTEL_NUC5
A96BOARDS = _mraa.A96BOARDS
INTEL_SOFIA_3GR = _mraa.INTEL_SOFIA_3GR
INTEL_CHERRYHILLS = _mraa.INTEL_CHERRYHILLS
INTEL_UP = _mraa.INTEL_UP
INTEL_JOULE_EXPANSION = _mraa.INTEL_JOULE_EXPANSION
PHYBOARD_WEGA = _mraa.PHYBOARD_WEGA
DE_NANO_SOC = _mraa.DE_NANO_SOC
INTEL_UP2 = _mraa.INTEL_UP2
MTK_LINKIT = _mraa.MTK_LINKIT
MTK_OMEGA2 = _mraa.MTK_OMEGA2
IEI_TANK = _mraa.IEI_TANK
FTDI_FT4222 = _mraa.FTDI_FT4222
GROVEPI = _mraa.GROVEPI
GENERIC_FIRMATA = _mraa.GENERIC_FIRMATA
ANDROID_PERIPHERALMANAGER = _mraa.ANDROID_PERIPHERALMANAGER
MOCK_PLATFORM = _mraa.MOCK_PLATFORM
NULL_PLATFORM = _mraa.NULL_PLATFORM
UNKNOWN_PLATFORM = _mraa.UNKNOWN_PLATFORM
INTEL_EDISON_MINIBOARD_J17_1 = _mraa.INTEL_EDISON_MINIBOARD_J17_1
INTEL_EDISON_MINIBOARD_J17_5 = _mraa.INTEL_EDISON_MINIBOARD_J17_5
INTEL_EDISON_MINIBOARD_J17_7 = _mraa.INTEL_EDISON_MINIBOARD_J17_7
INTEL_EDISON_MINIBOARD_J17_8 = _mraa.INTEL_EDISON_MINIBOARD_J17_8
INTEL_EDISON_MINIBOARD_J17_9 = _mraa.INTEL_EDISON_MINIBOARD_J17_9
INTEL_EDISON_MINIBOARD_J17_10 = _mraa.INTEL_EDISON_MINIBOARD_J17_10
INTEL_EDISON_MINIBOARD_J17_11 = _mraa.INTEL_EDISON_MINIBOARD_J17_11
INTEL_EDISON_MINIBOARD_J17_12 = _mraa.INTEL_EDISON_MINIBOARD_J17_12
INTEL_EDISON_MINIBOARD_J17_14 = _mraa.INTEL_EDISON_MINIBOARD_J17_14
INTEL_EDISON_MINIBOARD_J18_1 = _mraa.INTEL_EDISON_MINIBOARD_J18_1
INTEL_EDISON_MINIBOARD_J18_2 = _mraa.INTEL_EDISON_MINIBOARD_J18_2
INTEL_EDISON_MINIBOARD_J18_6 = _mraa.INTEL_EDISON_MINIBOARD_J18_6
INTEL_EDISON_MINIBOARD_J18_7 = _mraa.INTEL_EDISON_MINIBOARD_J18_7
INTEL_EDISON_MINIBOARD_J18_8 = _mraa.INTEL_EDISON_MINIBOARD_J18_8
INTEL_EDISON_MINIBOARD_J18_10 = _mraa.INTEL_EDISON_MINIBOARD_J18_10
INTEL_EDISON_MINIBOARD_J18_11 = _mraa.INTEL_EDISON_MINIBOARD_J18_11
INTEL_EDISON_MINIBOARD_J18_12 = _mraa.INTEL_EDISON_MINIBOARD_J18_12
INTEL_EDISON_MINIBOARD_J18_13 = _mraa.INTEL_EDISON_MINIBOARD_J18_13
INTEL_EDISON_MINIBOARD_J19_4 = _mraa.INTEL_EDISON_MINIBOARD_J19_4
INTEL_EDISON_MINIBOARD_J19_5 = _mraa.INTEL_EDISON_MINIBOARD_J19_5
INTEL_EDISON_MINIBOARD_J19_6 = _mraa.INTEL_EDISON_MINIBOARD_J19_6
INTEL_EDISON_MINIBOARD_J19_8 = _mraa.INTEL_EDISON_MINIBOARD_J19_8
INTEL_EDISON_MINIBOARD_J19_9 = _mraa.INTEL_EDISON_MINIBOARD_J19_9
INTEL_EDISON_MINIBOARD_J19_10 = _mraa.INTEL_EDISON_MINIBOARD_J19_10
INTEL_EDISON_MINIBOARD_J19_11 = _mraa.INTEL_EDISON_MINIBOARD_J19_11
INTEL_EDISON_MINIBOARD_J19_12 = _mraa.INTEL_EDISON_MINIBOARD_J19_12
INTEL_EDISON_MINIBOARD_J19_13 = _mraa.INTEL_EDISON_MINIBOARD_J19_13
INTEL_EDISON_MINIBOARD_J19_14 = _mraa.INTEL_EDISON_MINIBOARD_J19_14
INTEL_EDISON_MINIBOARD_J20_3 = _mraa.INTEL_EDISON_MINIBOARD_J20_3
INTEL_EDISON_MINIBOARD_J20_4 = _mraa.INTEL_EDISON_MINIBOARD_J20_4
INTEL_EDISON_MINIBOARD_J20_5 = _mraa.INTEL_EDISON_MINIBOARD_J20_5
INTEL_EDISON_MINIBOARD_J20_6 = _mraa.INTEL_EDISON_MINIBOARD_J20_6
INTEL_EDISON_MINIBOARD_J20_7 = _mraa.INTEL_EDISON_MINIBOARD_J20_7
INTEL_EDISON_MINIBOARD_J20_8 = _mraa.INTEL_EDISON_MINIBOARD_J20_8
INTEL_EDISON_MINIBOARD_J20_9 = _mraa.INTEL_EDISON_MINIBOARD_J20_9
INTEL_EDISON_MINIBOARD_J20_10 = _mraa.INTEL_EDISON_MINIBOARD_J20_10
INTEL_EDISON_MINIBOARD_J20_11 = _mraa.INTEL_EDISON_MINIBOARD_J20_11
INTEL_EDISON_MINIBOARD_J20_12 = _mraa.INTEL_EDISON_MINIBOARD_J20_12
INTEL_EDISON_MINIBOARD_J20_13 = _mraa.INTEL_EDISON_MINIBOARD_J20_13
INTEL_EDISON_MINIBOARD_J20_14 = _mraa.INTEL_EDISON_MINIBOARD_J20_14
INTEL_EDISON_GP182 = _mraa.INTEL_EDISON_GP182
INTEL_EDISON_GP135 = _mraa.INTEL_EDISON_GP135
INTEL_EDISON_GP27 = _mraa.INTEL_EDISON_GP27
INTEL_EDISON_GP20 = _mraa.INTEL_EDISON_GP20
INTEL_EDISON_GP28 = _mraa.INTEL_EDISON_GP28
INTEL_EDISON_GP111 = _mraa.INTEL_EDISON_GP111
INTEL_EDISON_GP109 = _mraa.INTEL_EDISON_GP109
INTEL_EDISON_GP115 = _mraa.INTEL_EDISON_GP115
INTEL_EDISON_GP128 = _mraa.INTEL_EDISON_GP128
INTEL_EDISON_GP13 = _mraa.INTEL_EDISON_GP13
INTEL_EDISON_GP165 = _mraa.INTEL_EDISON_GP165
INTEL_EDISON_GP19 = _mraa.INTEL_EDISON_GP19
INTEL_EDISON_GP12 = _mraa.INTEL_EDISON_GP12
INTEL_EDISON_GP183 = _mraa.INTEL_EDISON_GP183
INTEL_EDISON_GP110 = _mraa.INTEL_EDISON_GP110
INTEL_EDISON_GP114 = _mraa.INTEL_EDISON_GP114
INTEL_EDISON_GP129 = _mraa.INTEL_EDISON_GP129
INTEL_EDISON_GP130 = _mraa.INTEL_EDISON_GP130
INTEL_EDISON_GP44 = _mraa.INTEL_EDISON_GP44
INTEL_EDISON_GP46 = _mraa.INTEL_EDISON_GP46
INTEL_EDISON_GP48 = _mraa.INTEL_EDISON_GP48
INTEL_EDISON_GP131 = _mraa.INTEL_EDISON_GP131
INTEL_EDISON_GP14 = _mraa.INTEL_EDISON_GP14
INTEL_EDISON_GP40 = _mraa.INTEL_EDISON_GP40
INTEL_EDISON_GP43 = _mraa.INTEL_EDISON_GP43
INTEL_EDISON_GP77 = _mraa.INTEL_EDISON_GP77
INTEL_EDISON_GP82 = _mraa.INTEL_EDISON_GP82
INTEL_EDISON_GP83 = _mraa.INTEL_EDISON_GP83
INTEL_EDISON_GP134 = _mraa.INTEL_EDISON_GP134
INTEL_EDISON_GP45 = _mraa.INTEL_EDISON_GP45
INTEL_EDISON_GP47 = _mraa.INTEL_EDISON_GP47
INTEL_EDISON_GP49 = _mraa.INTEL_EDISON_GP49
INTEL_EDISON_GP15 = _mraa.INTEL_EDISON_GP15
INTEL_EDISON_GP84 = _mraa.INTEL_EDISON_GP84
INTEL_EDISON_GP42 = _mraa.INTEL_EDISON_GP42
INTEL_EDISON_GP41 = _mraa.INTEL_EDISON_GP41
INTEL_EDISON_GP78 = _mraa.INTEL_EDISON_GP78
INTEL_EDISON_GP79 = _mraa.INTEL_EDISON_GP79
INTEL_EDISON_GP80 = _mraa.INTEL_EDISON_GP80
INTEL_EDISON_GP81 = _mraa.INTEL_EDISON_GP81
RASPBERRY_WIRING_PIN8 = _mraa.RASPBERRY_WIRING_PIN8
RASPBERRY_WIRING_PIN9 = _mraa.RASPBERRY_WIRING_PIN9
RASPBERRY_WIRING_PIN7 = _mraa.RASPBERRY_WIRING_PIN7
RASPBERRY_WIRING_PIN15 = _mraa.RASPBERRY_WIRING_PIN15
RASPBERRY_WIRING_PIN16 = _mraa.RASPBERRY_WIRING_PIN16
RASPBERRY_WIRING_PIN0 = _mraa.RASPBERRY_WIRING_PIN0
RASPBERRY_WIRING_PIN1 = _mraa.RASPBERRY_WIRING_PIN1
RASPBERRY_WIRING_PIN2 = _mraa.RASPBERRY_WIRING_PIN2
RASPBERRY_WIRING_PIN3 = _mraa.RASPBERRY_WIRING_PIN3
RASPBERRY_WIRING_PIN4 = _mraa.RASPBERRY_WIRING_PIN4
RASPBERRY_WIRING_PIN5 = _mraa.RASPBERRY_WIRING_PIN5
RASPBERRY_WIRING_PIN12 = _mraa.RASPBERRY_WIRING_PIN12
RASPBERRY_WIRING_PIN13 = _mraa.RASPBERRY_WIRING_PIN13
RASPBERRY_WIRING_PIN6 = _mraa.RASPBERRY_WIRING_PIN6
RASPBERRY_WIRING_PIN14 = _mraa.RASPBERRY_WIRING_PIN14
RASPBERRY_WIRING_PIN10 = _mraa.RASPBERRY_WIRING_PIN10
RASPBERRY_WIRING_PIN11 = _mraa.RASPBERRY_WIRING_PIN11
RASPBERRY_WIRING_PIN17 = _mraa.RASPBERRY_WIRING_PIN17
RASPBERRY_WIRING_PIN21 = _mraa.RASPBERRY_WIRING_PIN21
RASPBERRY_WIRING_PIN18 = _mraa.RASPBERRY_WIRING_PIN18
RASPBERRY_WIRING_PIN19 = _mraa.RASPBERRY_WIRING_PIN19
RASPBERRY_WIRING_PIN22 = _mraa.RASPBERRY_WIRING_PIN22
RASPBERRY_WIRING_PIN20 = _mraa.RASPBERRY_WIRING_PIN20
RASPBERRY_WIRING_PIN26 = _mraa.RASPBERRY_WIRING_PIN26
RASPBERRY_WIRING_PIN23 = _mraa.RASPBERRY_WIRING_PIN23
RASPBERRY_WIRING_PIN24 = _mraa.RASPBERRY_WIRING_PIN24
RASPBERRY_WIRING_PIN27 = _mraa.RASPBERRY_WIRING_PIN27
RASPBERRY_WIRING_PIN25 = _mraa.RASPBERRY_WIRING_PIN25
RASPBERRY_WIRING_PIN28 = _mraa.RASPBERRY_WIRING_PIN28
RASPBERRY_WIRING_PIN29 = _mraa.RASPBERRY_WIRING_PIN29
SUCCESS = _mraa.SUCCESS
ERROR_FEATURE_NOT_IMPLEMENTED = _mraa.ERROR_FEATURE_NOT_IMPLEMENTED
ERROR_FEATURE_NOT_SUPPORTED = _mraa.ERROR_FEATURE_NOT_SUPPORTED
ERROR_INVALID_VERBOSITY_LEVEL = _mraa.ERROR_INVALID_VERBOSITY_LEVEL
ERROR_INVALID_PARAMETER = _mraa.ERROR_INVALID_PARAMETER
ERROR_INVALID_HANDLE = _mraa.ERROR_INVALID_HANDLE
ERROR_NO_RESOURCES = _mraa.ERROR_NO_RESOURCES
ERROR_INVALID_RESOURCE = _mraa.ERROR_INVALID_RESOURCE
ERROR_INVALID_QUEUE_TYPE = _mraa.ERROR_INVALID_QUEUE_TYPE
ERROR_NO_DATA_AVAILABLE = _mraa.ERROR_NO_DATA_AVAILABLE
ERROR_INVALID_PLATFORM = _mraa.ERROR_INVALID_PLATFORM
ERROR_PLATFORM_NOT_INITIALISED = _mraa.ERROR_PLATFORM_NOT_INITIALISED
ERROR_UART_OW_SHORTED = _mraa.ERROR_UART_OW_SHORTED
ERROR_UART_OW_NO_DEVICES = _mraa.ERROR_UART_OW_NO_DEVICES
ERROR_UART_OW_DATA_ERROR = _mraa.ERROR_UART_OW_DATA_ERROR
ERROR_UNSPECIFIED = _mraa.ERROR_UNSPECIFIED
PIN_VALID = _mraa.PIN_VALID
PIN_GPIO = _mraa.PIN_GPIO
PIN_PWM = _mraa.PIN_PWM
PIN_FAST_GPIO = _mraa.PIN_FAST_GPIO
PIN_SPI = _mraa.PIN_SPI
PIN_I2C = _mraa.PIN_I2C
PIN_AIO = _mraa.PIN_AIO
PIN_UART = _mraa.PIN_UART
I2C_STD = _mraa.I2C_STD
I2C_FAST = _mraa.I2C_FAST
I2C_HIGH = _mraa.I2C_HIGH
UART_PARITY_NONE = _mraa.UART_PARITY_NONE
UART_PARITY_EVEN = _mraa.UART_PARITY_EVEN
UART_PARITY_ODD = _mraa.UART_PARITY_ODD
UART_PARITY_MARK = _mraa.UART_PARITY_MARK
UART_PARITY_SPACE = _mraa.UART_PARITY_SPACE
def init():
return _mraa.init()
init = _mraa.init
def getVersion():
return _mraa.getVersion()
getVersion = _mraa.getVersion
def setPriority(priority):
return _mraa.setPriority(priority)
setPriority = _mraa.setPriority
def getPlatformType():
return _mraa.getPlatformType()
getPlatformType = _mraa.getPlatformType
def printError(result):
return _mraa.printError(result)
printError = _mraa.printError
def pinModeTest(pin, mode):
return _mraa.pinModeTest(pin, mode)
pinModeTest = _mraa.pinModeTest
def adcRawBits():
return _mraa.adcRawBits()
adcRawBits = _mraa.adcRawBits
def adcSupportedBits():
return _mraa.adcSupportedBits()
adcSupportedBits = _mraa.adcSupportedBits
def getPlatformName():
return _mraa.getPlatformName()
getPlatformName = _mraa.getPlatformName
def getPlatformVersion(*args):
return _mraa.getPlatformVersion(*args)
getPlatformVersion = _mraa.getPlatformVersion
def getPinCount():
return _mraa.getPinCount()
getPinCount = _mraa.getPinCount
def getUartCount():
return _mraa.getUartCount()
getUartCount = _mraa.getUartCount
def getI2cBusCount():
return _mraa.getI2cBusCount()
getI2cBusCount = _mraa.getI2cBusCount
def getI2cBusId(i2c_bus):
return _mraa.getI2cBusId(i2c_bus)
getI2cBusId = _mraa.getI2cBusId
def getPinName(pin):
return _mraa.getPinName(pin)
getPinName = _mraa.getPinName
def getGpioLookup(pin_name):
return _mraa.getGpioLookup(pin_name)
getGpioLookup = _mraa.getGpioLookup
def getI2cLookup(i2c_name):
return _mraa.getI2cLookup(i2c_name)
getI2cLookup = _mraa.getI2cLookup
def getSpiLookup(spi_name):
return _mraa.getSpiLookup(spi_name)
getSpiLookup = _mraa.getSpiLookup
def getPwmLookup(pwm_name):
return _mraa.getPwmLookup(pwm_name)
getPwmLookup = _mraa.getPwmLookup
def getUartLookup(uart_name):
return _mraa.getUartLookup(uart_name)
getUartLookup = _mraa.getUartLookup
def setLogLevel(level):
return _mraa.setLogLevel(level)
setLogLevel = _mraa.setLogLevel
def hasSubPlatform():
return _mraa.hasSubPlatform()
hasSubPlatform = _mraa.hasSubPlatform
def isSubPlatformId(pin_or_bus_id):
return _mraa.isSubPlatformId(pin_or_bus_id)
isSubPlatformId = _mraa.isSubPlatformId
def getSubPlatformId(pin_or_bus_index):
return _mraa.getSubPlatformId(pin_or_bus_index)
getSubPlatformId = _mraa.getSubPlatformId
def getSubPlatformIndex(pin_or_bus_id):
return _mraa.getSubPlatformIndex(pin_or_bus_id)
getSubPlatformIndex = _mraa.getSubPlatformIndex
def getDefaultI2cBus(*args):
return _mraa.getDefaultI2cBus(*args)
getDefaultI2cBus = _mraa.getDefaultI2cBus
def addSubplatform(subplatformtype, dev):
return _mraa.addSubplatform(subplatformtype, dev)
addSubplatform = _mraa.addSubplatform
def removeSubplatform(subplatformtype):
return _mraa.removeSubplatform(subplatformtype)
removeSubplatform = _mraa.removeSubplatform
def initJsonPlatform(path):
return _mraa.initJsonPlatform(path)
initJsonPlatform = _mraa.initJsonPlatform
def gpioFromDesc(desc):
return _mraa.gpioFromDesc(desc)
gpioFromDesc = _mraa.gpioFromDesc
def aioFromDesc(desc):
return _mraa.aioFromDesc(desc)
aioFromDesc = _mraa.aioFromDesc
def uartFromDesc(desc):
return _mraa.uartFromDesc(desc)
uartFromDesc = _mraa.uartFromDesc
def spiFromDesc(desc):
return _mraa.spiFromDesc(desc)
spiFromDesc = _mraa.spiFromDesc
def i2cFromDesc(desc):
return _mraa.i2cFromDesc(desc)
i2cFromDesc = _mraa.i2cFromDesc
def pwmFromDesc(desc):
return _mraa.pwmFromDesc(desc)
pwmFromDesc = _mraa.pwmFromDesc
def ledFromDesc(desc):
return _mraa.ledFromDesc(desc)
ledFromDesc = _mraa.ledFromDesc
MODE_STRONG = _mraa.MODE_STRONG
MODE_PULLUP = _mraa.MODE_PULLUP
MODE_PULLDOWN = _mraa.MODE_PULLDOWN
MODE_HIZ = _mraa.MODE_HIZ
DIR_OUT = _mraa.DIR_OUT
DIR_IN = _mraa.DIR_IN
DIR_OUT_HIGH = _mraa.DIR_OUT_HIGH
DIR_OUT_LOW = _mraa.DIR_OUT_LOW
EDGE_NONE = _mraa.EDGE_NONE
EDGE_BOTH = _mraa.EDGE_BOTH
EDGE_RISING = _mraa.EDGE_RISING
EDGE_FALLING = _mraa.EDGE_FALLING
MODE_IN_ACTIVE_HIGH = _mraa.MODE_IN_ACTIVE_HIGH
MODE_IN_ACTIVE_LOW = _mraa.MODE_IN_ACTIVE_LOW
MODE_OUT_OPEN_DRAIN = _mraa.MODE_OUT_OPEN_DRAIN
MODE_OUT_PUSH_PULL = _mraa.MODE_OUT_PUSH_PULL
class Gpio(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Gpio, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Gpio, name)
__repr__ = _swig_repr
def __init__(self, pin, owner=True, raw=False):
this = _mraa.new_Gpio(pin, owner, raw)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _mraa.delete_Gpio
def __del__(self):
return None
def edge(self, mode):
return _mraa.Gpio_edge(self, mode)
def isr(self, mode, pyfunc, args):
return _mraa.Gpio_isr(self, mode, pyfunc, args)
def isrExit(self):
return _mraa.Gpio_isrExit(self)
def mode(self, mode):
return _mraa.Gpio_mode(self, mode)
def dir(self, dir):
return _mraa.Gpio_dir(self, dir)
def readDir(self):
return _mraa.Gpio_readDir(self)
def read(self):
return _mraa.Gpio_read(self)
def write(self, value):
return _mraa.Gpio_write(self, value)
def useMmap(self, enable):
return _mraa.Gpio_useMmap(self, enable)
def getPin(self, raw=False):
return _mraa.Gpio_getPin(self, raw)
def inputMode(self, mode):
return _mraa.Gpio_inputMode(self, mode)
def outputMode(self, mode):
return _mraa.Gpio_outputMode(self, mode)
# Register Gpio in _mraa:
_mraa.Gpio_swigregister(Gpio)
class I2c(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, I2c, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, I2c, name)
__repr__ = _swig_repr
def __init__(self, bus, raw=False):
this = _mraa.new_I2c(bus, raw)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _mraa.delete_I2c
def __del__(self):
return None
def frequency(self, mode):
return _mraa.I2c_frequency(self, mode)
def address(self, address):
return _mraa.I2c_address(self, address)
def readByte(self):
return _mraa.I2c_readByte(self)
def read(self, data):
return _mraa.I2c_read(self, data)
def readReg(self, reg):
return _mraa.I2c_readReg(self, reg)
def readWordReg(self, reg):
return _mraa.I2c_readWordReg(self, reg)
def readBytesReg(self, reg, data):
return _mraa.I2c_readBytesReg(self, reg, data)
def writeByte(self, data):
return _mraa.I2c_writeByte(self, data)
def write(self, data):
return _mraa.I2c_write(self, data)
def writeReg(self, reg, data):
return _mraa.I2c_writeReg(self, reg, data)
def writeWordReg(self, reg, data):
return _mraa.I2c_writeWordReg(self, reg, data)
# Register I2c in _mraa:
_mraa.I2c_swigregister(I2c)
class Pwm(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Pwm, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Pwm, name)
__repr__ = _swig_repr
def __init__(self, pin, owner=True, chipid=-1):
this = _mraa.new_Pwm(pin, owner, chipid)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _mraa.delete_Pwm
def __del__(self):
return None
def write(self, percentage):
return _mraa.Pwm_write(self, percentage)
def read(self):
return _mraa.Pwm_read(self)
def period(self, period):
return _mraa.Pwm_period(self, period)
def period_ms(self, ms):
return _mraa.Pwm_period_ms(self, ms)
def period_us(self, us):
return _mraa.Pwm_period_us(self, us)
def pulsewidth(self, seconds):
return _mraa.Pwm_pulsewidth(self, seconds)
def pulsewidth_ms(self, ms):
return _mraa.Pwm_pulsewidth_ms(self, ms)
def pulsewidth_us(self, us):
return _mraa.Pwm_pulsewidth_us(self, us)
def enable(self, enable):
return _mraa.Pwm_enable(self, enable)
def max_period(self):
return _mraa.Pwm_max_period(self)
def min_period(self):
return _mraa.Pwm_min_period(self)
# Register Pwm in _mraa:
_mraa.Pwm_swigregister(Pwm)
SPI_MODE0 = _mraa.SPI_MODE0
SPI_MODE1 = _mraa.SPI_MODE1
SPI_MODE2 = _mraa.SPI_MODE2
SPI_MODE3 = _mraa.SPI_MODE3
class Spi(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Spi, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Spi, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _mraa.new_Spi(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _mraa.delete_Spi
def __del__(self):
return None
def mode(self, mode):
return _mraa.Spi_mode(self, mode)
def frequency(self, hz):
return _mraa.Spi_frequency(self, hz)
def writeByte(self, data):
return _mraa.Spi_writeByte(self, data)
def writeWord(self, data):
return _mraa.Spi_writeWord(self, data)
def write(self, txBuf):
return _mraa.Spi_write(self, txBuf)
def lsbmode(self, lsb):
return _mraa.Spi_lsbmode(self, lsb)
def bitPerWord(self, bits):
return _mraa.Spi_bitPerWord(self, bits)
# Register Spi in _mraa:
_mraa.Spi_swigregister(Spi)
class Aio(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Aio, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Aio, name)
__repr__ = _swig_repr
def __init__(self, pin):
this = _mraa.new_Aio(pin)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _mraa.delete_Aio
def __del__(self):
return None
def read(self):
return _mraa.Aio_read(self)
def readFloat(self):
return _mraa.Aio_readFloat(self)
def setBit(self, bits):
return _mraa.Aio_setBit(self, bits)
def getBit(self):
return _mraa.Aio_getBit(self)
# Register Aio in _mraa:
_mraa.Aio_swigregister(Aio)
class Uart(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Uart, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Uart, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _mraa.new_Uart(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _mraa.delete_Uart
def __del__(self):
return None
def getDevicePath(self):
return _mraa.Uart_getDevicePath(self)
def read(self, data):
return _mraa.Uart_read(self, data)
def write(self, data):
return _mraa.Uart_write(self, data)
def readStr(self, length):
return _mraa.Uart_readStr(self, length)
def writeStr(self, data):
return _mraa.Uart_writeStr(self, data)
def dataAvailable(self, millis=0):
return _mraa.Uart_dataAvailable(self, millis)
def flush(self):
return _mraa.Uart_flush(self)
def sendBreak(self, duration):
return _mraa.Uart_sendBreak(self, duration)
def setBaudRate(self, baud):
return _mraa.Uart_setBaudRate(self, baud)
def setMode(self, bytesize, parity, stopbits):
return _mraa.Uart_setMode(self, bytesize, parity, stopbits)
def setFlowcontrol(self, xonxoff, rtscts):
return _mraa.Uart_setFlowcontrol(self, xonxoff, rtscts)
def setTimeout(self, read, write, interchar):
return _mraa.Uart_setTimeout(self, read, write, interchar)
def setNonBlocking(self, nonblock):
return _mraa.Uart_setNonBlocking(self, nonblock)
# Register Uart in _mraa:
_mraa.Uart_swigregister(Uart)
class Led(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Led, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Led, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _mraa.new_Led(*args)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _mraa.delete_Led
def __del__(self):
return None
def setBrightness(self, value):
return _mraa.Led_setBrightness(self, value)
def readBrightness(self):
return _mraa.Led_readBrightness(self)
def readMaxBrightness(self):
return _mraa.Led_readMaxBrightness(self)
def trigger(self, trigger):
return _mraa.Led_trigger(self, trigger)
def clearTrigger(self):
return _mraa.Led_clearTrigger(self)
# Register Led in _mraa:
_mraa.Led_swigregister(Led)
# This file is compatible with both classic and new-style classes.
```
#### File: FLIR-pubsub/scratch/camera_puzzle.py
```python
import cv2
from scratch.rtsp.cv_videostream import CV_VideoStream
from kivy.app import App
from kivy.uix.camera import Camera
from kivy.uix.widget import Widget
from kivy.uix.slider import Slider
from kivy.uix.scatter import Scatter
from kivy.uix.image import Image
from kivy.animation import Animation
from kivy.graphics import Color, Rectangle
from kivy.clock import Clock
from kivy.properties import NumericProperty
from random import randint, random
from functools import partial
from kivy.graphics.texture import Texture
from kivy.uix.boxlayout import BoxLayout
from scratch.rtsp.cv_videostream import CV_VideoStream
import cv2
import numpy as np
class Puzzle(Image):
blocksize = NumericProperty(250)
def on_texture_size(self, instance, value):
self.build()
def on_blocksize(self, instance, value):
self.build()
def build(self):
self.clear_widgets()
self.allow_stretch = True
texture = self.texture
if not texture:
return
bs = self.blocksize
tw, th = self.texture_size
for x in range(int(tw / bs)):
for y in range(int(th / bs)):
bx = x * bs
by = y * bs
subtexture = texture.get_region(bx, by, bs, bs)
# node = PuzzleNode(texture=subtexture,
# size=(bs, bs), pos=(bx, by))
node = Scatter(pos=(bx, by), size=(bs, bs))
with node.canvas:
Color(1, 1, 1)
Rectangle(size=node.size, texture=subtexture)
self.add_widget(node)
self.shuffle()
# self.update_nodes(self.texture)
def update_nodes(self, dt):
# texture = self.texture
tw, th = self.texture_size
# buf = np.array([int(random.random() * x * 255 / size) for x in range(size)])
buf = np.ones(1280*720*3, dtype=np.int8)
texture1 = Texture.create(size=(1280, 720), colorfmt='bgr')
texture1.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
# buf = np.ones([int(random.random() * x * 255 / size) for x in range(size)])
print('update', max(buf), min(buf), np.mean(buf))
# then blit the buffer
# texture.blit_buffer(buf.tostring(), colorfmt='bgr', bufferfmt='ubyte')
bs = self.blocksize
count = int(tw / bs) * int(th / bs)
childindex = 0
for x in range(int(tw / bs)):
for y in range(int(th / bs)):
bx = x * bs
by = y * bs
subtexture = texture1.get_region(bx, by, bs, bs)
child = self.children[childindex]
child.texture = subtexture
childindex += 1
# self.texture = texture1
def shuffle(self):
texture = self.texture
bs = self.blocksize
tw, th = self.texture_size
count = int(tw / bs) * int(th / bs)
indices = list(range(count))
childindex = 0
while indices:
index = indices.pop(randint(0, len(indices) - 1))
x = bs * (index % int(tw / bs))
y = bs * int(index / int(tw / bs))
child = self.children[childindex]
a = Animation(d=random() / 4.) + Animation(pos=(x, y),
t='out_quad', d=.4)
a.start(child)
childindex += 1
def on_touch_down(self, touch):
# self.update_nodes()
print("touchdown")
# return True
super(Puzzle, self).on_touch_down(touch)
# def on_touch_down(self, touch):
# if touch.is_double_tap:
# self.shuffle()
# return True
# super(Puzzle, self).on_touch_down(touch)
class PuzzleApp(App):
def build(self):
self.vs = CV_VideoStream(src="rtsp://192.168.183.242:554", verbose=True).start()
root = Widget()
self.puzzle = Puzzle(source='images/aero1.jpg')
self.texturebuffer = Texture.create(size=(1280, 720), colorfmt='bgr')
slider = Slider(min=200, max=400, step=10, size=(800, 50),size_hint=(1,0.1) )
slider.bind(value=partial(self.on_value, self.puzzle))
layout = BoxLayout(orientation='vertical')
layout.add_widget(self.puzzle)
layout.add_widget(slider)
# layout.add_widget(button)
Clock.schedule_interval(self.update_kivy, 1.0 / 30.0)
# Clock.schedule_interval(self.puzzle.update_nodes, 1.0 / 5.0)
return layout
def on_value(self, puzzle, instance, value):
value = int((value + 5) / 10) * 10
puzzle.blocksize = value
instance.value = value
def update_kivy(self, dt):
'''display image from cam in kivy window'''
frame = self.vs.read()
if hasattr(frame, 'size') and frame.size > 100000:
# convert it to texture
buf = cv2.flip(frame, 0)
buf = buf.tostring()
# self.texturebuffer.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
# # display image from the texture
# self.puzzle.texture = self.texturebuffer
texture1 = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
texture1.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
# display image from the texture
self.puzzle.texture = texture1
# self.puzzle.update_nodes(texture1)
if __name__ == '__main__':
PuzzleApp().run()
```
#### File: scratch/flask_VS_like/camera_test.py
```python
import time
import cv2
import numpy as np
from scratch.flask_VS_like.base_camera import *
class CameraTest(BaseCamera):
"""An emulated camera implementation that streams a repeated sequence of
files 1.jpg, 2.jpg and 3.jpg at a rate of one frame per second."""
imgs = [cv2.imread('../images/' + f + '.jpg') for f in ['1', '2', '3']]
a = 'do nothing'
@staticmethod
def frames():
shape = (1000, 1500, 3)
i = 0
while True:
time.sleep(0.1)
img = np.zeros(shape, dtype=np.uint8)
cv2.putText(img, f"frame: {i}",
(shape[1] // 4, shape[0] // 2), cv2.FONT_HERSHEY_SIMPLEX, 5, (255, 255, 255), 5)
i += 1
# if not close
# yield Camera.imgs[int(time.time()) % 3]
yield img
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (frame)
if __name__ == '__main__':
frames1 = gen(CameraTest())
cv2.namedWindow('VIDEO1')
frames2 = gen(CameraTest())
cv2.namedWindow('VIDEO2')
count = 0
while True:
count += 1
if count < 10:
frame = next(frames1, None)
if frame is not None:
cv2.imshow('VIDEO1', frame)
frame = next(frames2, None)
if frame is not None:
cv2.imshow('VIDEO2', frame)
else:
time.sleep(0.1)
k = cv2.waitKey(10)
txt = None
if k == 27 or k == 3:
break # esc to quit
elif k == ord('s'):
count = 0
# print(count)
cv2.destroyAllWindows()
```
#### File: scratch/kivy_multi/flir_cam_controller.py
```python
__all__ = ['NP_SharedMemory', 'SpinProcess', 'fake_camera_process', 'flir_camera_process', 'VideoCapture']
import logging
import multiprocessing
from multiprocessing import Process, freeze_support, Lock, current_process, Value, Array
import numpy as np
import time
import cv2, imutils, os
from FLIR_pubsub import FLIR_client_utils as flir
width = 1000
height = 750
class NP_SharedMemory:
"""
Controls access to shared memory for the main and camera processes
https://research.wmz.ninja/articles/2018/03/on-sharing-large-arrays-when-using-pythons-multiprocessing.html
https://docs.python.org/3/library/multiprocessing.html
https://dzone.com/articles/python-201-a-multiprocessing-tutorial
https://docs.python.org/3.7/library/multiprocessing.html#synchronization-primitives
https://docs.python.org/3.7/library/array.html#module-array
Note for Windows:
https://docs.python.org/2/library/multiprocessing.html#windows
"""
def __init__(self, shape, name='demo'):
# self.shape = shape
self.name = name
self._paint = Value('B', False) # signal ready to _paint , 'B' unsigned char
self._stop = Value('B', False) # signal _stop process
self._kick = Value('B', 0) # _kick to keep awake
# self._lock = Lock()
self._width = Value('I', 0) # 'I' unsigned int
self._height = Value('I', 0) # 'I' unsigned int
# create shared buffer buf_mp
self._base_arr:Array = Array('B', 4000*3000*3) # largest size
# self._base_arr:Array = Array('B', shape[0] * shape[1] * shape[2])
self.arr = None # need to call set_local_np_view in local process
def set_local_np_view(self):
""" run this within the local process to create a local NP view of the shared memory """
# Wrap buf_mp as an numpy array so we can easily manipulates its data.
# self.arr = np.frombuffer(self._arr_mp.get_obj(), dtype=np.uint8).reshape(self.shape)
self.arr = np.frombuffer(self._base_arr.get_obj(), dtype=np.uint8) # No reshape i.e flat
print(f'{current_process().name} PID:{os. getpid()} - {self.name}: Setting up local numpy array view: {self.arr.shape} of {self.arr.dtype}')
def close(self):
''' not sure if we need this'''
del self._base_arr
def FLIR_camera_process(sm:NP_SharedMemory, name='', url='localhost'):
""" Received frames from a single camera. Must have the server running"""
chan = flir.FLIR_Client(name=name, url=url)
i = 0
while not sm._stop.value:
try:
frame, topic, md = chan.read_image()
except KeyboardInterrupt:
break
k = cv2.waitKey(10)
if k == 27 or k == 3:
break # esc to quit
if frame is not None:
frame = imutils.resize(frame, width=width, height=height)
cv2.putText(frame, f"{md['framedata']['frameid']}",
(10, 100), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 5)
cv2.imshow(topic, frame)
txt = None
if k == ord('c'):
i = 0
txt = f'Reset name to {topic}-{i}.jpg'
elif k >= ord('s'):
txt = f'Saving {topic}-{i}.jpg'
cv2.imwrite(f'{topic}-{i}.jpg', frame)
i += 1
if txt is not None:
print(txt)
cv2.putText(frame, txt, (100, 500), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 5)
cv2.imshow(topic, frame)
cv2.waitKey(1000)
chan.close()
cv2.destroyAllWindows()
print('Finished')
print("[INFO] approx. FPS: {:.2f}".format(chan.fps.fps()))
class SpinProcess():
'''
Spin a multiprocess with controlls to read a shared numpy array
process_name = multiprocessing.current_process().name
thread_name = threading.current_thread().name
'''
def __init__(self, target=None, sharemem=None):
self.sm = sharemem
self.proc = Process(target=target, args=(sharemem,))
self.proc.start()
def read(self, kick=True):
''' read the process across shared memory '''
if kick: self.sm._kick.value = True # kick signal keep process from idle
# todo JN could restart process if stopped
shape = (self.sm._height.value, self.sm._width.value, 3)
if self.sm._paint.value and shape[0]*shape[0] > 10000:
# .reshape()
# self.sm.arr.shape = shape
ret = self.sm
self.sm._paint.value = False
return ret
else:
return None
def kick(self):
''' kick the process to keep alive '''
# todo JN could restart process if stopped
self.sm._kick.value = True
def shutdown(self):
''' shutdown process'''
self.sm._stop.value = True
def set_stop(self):
''' shutdown process'''
self.sm._stop.value = True
def get_paint(self):
return sm1._paint.value
def set_paint(self, val):
sm1._paint.value = val
def join(self):
self.sm._stop.value = True
self.proc.join()
def release(self):
self.sm._stop.value = True
self.proc.join()
class VideoCapture():
""" Return a videocapture view of the spin process read """
def __init__(self, src=None, sharemem=None):
self.src = src
self._proc = SpinProcess(target=src, sharemem=sharemem)
self._open = True
def getBacendName(self):
return "flir_cam_controller"
def read(self, kick=True):
# todo JN could restart process if stopped
return self._proc.read()
def isOpened(self):
return self._open
def kick(self):
self._proc._kick()
def release(self):
self._open = False
self._proc.shutdown()
import time
import FLIR_pubsub.FLIR_client_utils as fcu
def fake_camera_process(sm:NP_SharedMemory,):
""" Run a fake or test separate process to process and decode camera frames """
sm.set_local_np_view()
i = 0
last_access = time.time()
cam_idle = False
IDLE_TIMEOUT = 5
while not sm._stop.value:
# sm._lock.acquire()
try:
if sm._kick.value:
last_access = time.time()
# print(f'Cam {sm.name}: Stream Start: i = {i}')
sm._kick.value = False
cam_idle = False
if time.time() - last_access > IDLE_TIMEOUT:
if not cam_idle:
print(f'Cam {sm.name}: Stream Idle : i = {i}')
cam_idle = True
if not cam_idle:
# do work
time.sleep(0.01)
# do some hard work for 10 msec
start = time.monotonic()
while time.monotonic() - start < 0.01:
pass
# fill buffer with fake data
arr = np.ones((2000//2,3000//2,3), dtype=np.uint8)*33
# arr = np.ones((2000, 3000, 3), dtype=np.uint8) * 33
arr += 10 # here just inc by 1
cv2.putText(arr, f"{i} : FLIR ",
(arr.shape[1]//20, arr.shape[0]//2), cv2.FONT_HERSHEY_SIMPLEX, 10, (255, 255, 255), 10)
sm._width.value = arr.shape[1]
sm._height.value = arr.shape[0]
# arr = np.flipud(arr).ravel() # kivy requires flipping
sm.arr[:arr.size] = arr.ravel()
# np.copyto(sm.arr[:arr.size], arr)
sm._paint.value = True
i = i + 1
else:
time.sleep(0.1)
except KeyboardInterrupt:
print("Cam: KeyboardInterrupt")
pass
# sm._lock.release()
print('Cam: _stop')
def flir_camera_process(sm:NP_SharedMemory,):
""" start a client socket process to process and decode remote FLIR camera frames """
name = 'FrontLeft'
url = 'localhost'
client = fcu.FLIR_Client(name=name, url=url)
sm.set_local_np_view()
i = 0
last_access = time.time()
cam_idle = False
IDLE_TIMEOUT = 5
while not sm._stop.value:
# sm._lock.acquire()
try:
if sm._kick.value:
last_access = time.time()
# print(f'Cam {sm.name}: Stream Start: i = {i}')
sm._kick.value = False
cam_idle = False
if time.time() - last_access > IDLE_TIMEOUT:
if not cam_idle:
print(f'Cam {sm.name}: Stream Idle : i = {i}')
cam_idle = True
if not cam_idle:
time.sleep(0.01)
# # do some hard work for 10 msec
# start = time.monotonic()
# while time.monotonic() - start < 0.01:
# pass
# fill buffer
arr, name, md = client.read_image()
if arr is not None:
arr = imutils.resize(arr, width=1200)
cv2.putText(arr, f"{i}",
(arr.shape[1]//20, arr.shape[0]//2), cv2.FONT_HERSHEY_SIMPLEX, 10, (255, 255, 255), 10)
sm._width.value = arr.shape[1]
sm._height.value = arr.shape[0]
# arr = np.flipud(arr).ravel() # kivy requires flipping
sm.arr[:arr.size] = arr.ravel()
# np.copyto(sm.arr[:arr.size], arr)
# sm.arr += 1 # here just inc by 1
sm._paint.value = True
i = i + 1
else:
time.sleep(0.01)
else:
time.sleep(0.1)
except KeyboardInterrupt:
print("Cam: KeyboardInterrupt")
pass
# sm._lock.release()
print('Cam: _stop')
if __name__ == '__main__':
''' Test the above of the using fake data and cv2.namedWindow() view '''
freeze_support() # The freeze_support() line can be omitted if the program will be run normally instead of frozen.
multiprocessing.log_to_stderr()
logger = multiprocessing.get_logger()
logger.setLevel(logging.INFO)
# # shared memory
sm1 = NP_SharedMemory((2000, 3000, 3), name='camera_1')
sm1.set_local_np_view()
sm2 = NP_SharedMemory((2000, 3000, 3), name='camera_2')
sm2.set_local_np_view()
cv2.namedWindow(sm1.name, cv2.WINDOW_NORMAL )
cv2.namedWindow(sm2.name, cv2.WINDOW_NORMAL )
p1 = SpinProcess(target=fake_camera_process, sharemem=sm1)
vc2 = VideoCapture(src=fake_camera_process, sharemem=sm2)
while True:
try:
time.sleep(0.1)
arr = p1.read(kick=False) # dont kick process to test idle state
if arr is not None:
cv2.imshow(sm1.name, arr)
arr = vc2.read()
if arr is not None:
cv2.imshow(sm2.name, arr)
k = cv2.waitKey(1)
txt = None
if k == 27 or k == 3:
break # esc to quit
elif k == ord('s'):
p1.shutdown()
vc2.release()
elif k == ord('g'):
p1.kick()
vc2.kick()
except KeyboardInterrupt:
print("Main: KeyboardInterrupt")
break
print('Main: End of program')
p1.release()
vc2.release()
cv2.destroyAllWindows()
```
#### File: scratch/mult-process/mp1b.py
```python
import logging
import multiprocessing
from multiprocessing import Process, freeze_support, Lock, current_process, Value, Array, RawArray, RawValue
import numpy as np
import time
import cv2, imutils
class NP_SharedMemory:
"""
Controls access to shared memory for the main and camera processes
https://research.wmz.ninja/articles/2018/03/on-sharing-large-arrays-when-using-pythons-multiprocessing.html
https://docs.python.org/3/library/multiprocessing.html
https://dzone.com/articles/python-201-a-multiprocessing-tutorial
https://docs.python.org/3.7/library/multiprocessing.html#synchronization-primitives
"""
def __init__(self, shape, name='demo'):
self.shape = shape
self.name = name
self.paint = Value('B', False)
self.stop = Value('B', False)
self.start = Value('B', 0)
self.lock = Lock()
# create shared buffer buf_mp
self._arr_mp = Array('B', shape[0] * shape[1] * shape[2])
self.arr = None # need to call set_local_np_view in local process
def set_local_np_view(self):
""" run this within the local process to create a local NP view of the shared memory """
# Wrap buf_mp as an numpy array so we can easily manipulates its data.
self.arr = np.frombuffer(self._arr_mp.get_obj(), dtype=np.uint8).reshape(self.shape)
print(f'{current_process} - {self.name}: Setting up Local NP Buffer: { self.arr.shape}')
def demo_camera_process(sm:NP_SharedMemory,):
""" Run a separate process to process and decode camera frames """
sm.set_local_np_view()
i = 0
last_access = time.time()
cam_running = True
while not sm.stop.value:
time.sleep(0.1)
# sm.lock.acquire()
try:
if sm.start.value:
last_access = time.time()
print(f'Cam {sm.name}: Stream Start: i = {i}')
sm.start.value = False
cam_running = True
i = 0
if time.time() - last_access > 2:
if cam_running:
print(f'Cam {sm.name}: Stream Idle : i = {i}')
cam_running = False
if cam_running:
# fill buffer
sm.arr += 1
cv2.putText(sm.arr, f"frame: {i}",
(sm.shape[1]//4, sm.shape[0]//2), cv2.FONT_HERSHEY_SIMPLEX, 10, (255, 255, 255), 10)
sm.paint.value = True
i = i + 1
except KeyboardInterrupt:
print("Cam: KeyboardInterrupt")
pass
# sm.lock.release()
print('Cam: Stop')
if __name__ == '__main__':
# freeze_support() # for MS Windows
multiprocessing.log_to_stderr()
logger = multiprocessing.get_logger()
logger.setLevel(logging.INFO)
# # shared memory
sm1 = NP_SharedMemory((2000, 3000, 3), name='camera_1')
sm1.set_local_np_view()
sm2 = NP_SharedMemory((2000, 3000, 3), name='camera_2')
sm2.set_local_np_view()
cv2.namedWindow(sm1.name, cv2.WINDOW_NORMAL )
cv2.namedWindow(sm2.name, cv2.WINDOW_NORMAL )
p1 = Process(target=demo_camera_process, args=(sm1,)).start()
p2 = Process(target=demo_camera_process, args=(sm2,)).start()
i = 10
sm1.start.value = True
while True:
time.sleep(0.1)
# sm1.lock.acquire()
try:
if sm1.paint.value:
cv2.imshow(sm1.name, sm1.arr)
sm1.paint.value = False
if sm2.paint.value:
cv2.imshow(sm2.name, sm2.arr)
sm2.paint.value = False
k = cv2.waitKey(1)
txt = None
if k == 27 or k == 3:
# sm1.stop.value = True
break # esc to quit
elif k == ord('s'):
sm1.start.value = False
sm2.start.value = False
# i = 0
elif k == ord('g'):
sm1.start.value = True
sm2.start.value = True
# i = 10
except KeyboardInterrupt:
print("Main: KeyboardInterrupt")
break
# sm1.lock.release()
print('Main: End of program')
sm1.stop.value = True
sm2.stop.value = True
# sm1.lock.release()
# time.sleep(2)
print('Main: join')
p1.join()
p2.join()
print('Main: bye')
cv2.destroyAllWindows()
```
#### File: scratch/rtsp/flir_videostream.py
```python
from threading import Thread
import cv2, time
from FLIR_pubsub.FLIR_client_utils import *
class Flir_VideoStream:
""" Maintain live FLIRCam feed without buffering. """
def __init__(self, src=0, name="FlirVideoStream", verbose = False):
"""
src: the path to an RTSP server. should start with "rtsp://"
name: give it a name
verbose: print log or not
"""
self.name = name # initialize the thread name
self.fps = 0.0 # measured fps
self._src = src
self._verbose = verbose
self._stream = None
self._frame = None # returned images from stream
# initialize the variable used to indicate if the thread should be stopped
self._stopped = False
self._fps = FPS()
def start(self):
"""start the thread to read frames from the video stream"""
if self._verbose:
print(f"[INFO] connecting to Cam: {self._src}")
self._stopped = False
self._thread = Thread(target=self.update, name=self.name, args=())
self._thread.daemon = True
self._thread.start()
self._fps.start()
return self
def connect(self):
if self.isOpened():
self._stream.release()
self._stream = cv2.VideoCapture(self._src)
if self._verbose:
if self._stream.isOpened():
print(f"[INFO] connected to Cam: {self._src}")
else:
print(f"[INFO] Failed to connect Cam: {self._src}")
time.sleep(1)
def update(self):
"""keep looping infinitely until the thread is stopped"""
while not self._stopped:
if self._stream is not None and self._stream.isOpened():
(self.grabbed, self._frame) = self._stream.read()
if self.grabbed:
self._fps.update()
self.last = datetime.datetime.now()
time.sleep(0.01)
else:
self.connect()
time.sleep(0.01)
# time.sleep(1)
if self._fps.elapsed() > 5:
self._fps.stop()
self.fps = self._fps.fps
print(self.fps)
if self._fps.numFrames == 0:
self.connect()
self._fps.start()
# Thread has stopped
if self._verbose:
print(f"[INFO] Connection closed Cam: {self._src}")
def read(self):
# return the frame most recently read
return self._frame
def stop(self):
# indicate that the thread should be stopped or closed
self._close()
def close(self):
# indicate that the thread should be stopped or closed
self._close()
def _close(self):
if self.isOpened():
self._stream.release()
self._stopped = True
# wait until stream resources are released (producer thread might be still grabbing frame)
# Todo this code does not always work, Thread is a daemon so closes anyhow
# if not self._thread._is_stopped:
# self._thread.join()
# else:
# pass
def isOpened(self):
try:
return self._stream is not None and self._stream.isOpened()
except:
return False
import datetime
class FPS:
'''Calculate the frames per second'''
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self.numFrames = 0
self.fps = 0.0
def start(self):
# start the timer
self._start = datetime.datetime.now()
self._end = None
self.numFrames = 0
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
self.fps = self.numFrames / self.elapsed()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self.numFrames += 1
# return self._numFrames
def elapsed(self):
# return the total number of seconds between the start and
# end interval
# if self._end is None:
self._end = datetime.datetime.now()
# ret = (self._end - self._start).total_seconds()
# self._end = None
# else:
# ret = (self._end - self._start).total_seconds()
return (self._end - self._start).total_seconds()
def poll_fps(self):
# compute the (approximate) frames per second without stopping
# if self._end is not None:
# return self._numFrames / self.elapsed()
# else:
self.numFrames += 1
self._end = datetime.datetime.now()
self.fps = self.numFrames / self.elapsed()
return self.fps
# self._end = None
# def fps(self):
# # compute the (approximate) frames per second, must be stopped first
# return self._numFrames / self.elapsed()
``` |
{
"source": "johnnewto/FocusStackPy",
"score": 2
} |
#### File: johnnewto/FocusStackPy/AverageImages.py
```python
import os
#import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
import time
from skimage.feature import ORB, match_descriptors
from skimage.measure import ransac
from skimage.transform import warp, downscale_local_mean, resize, SimilarityTransform
from skimage.io import imread, imsave
import time
from PIL import Image
########################################################################33
def matchFeatures(keypoints1, descriptors1, keypoints2, descriptors2):
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
# Select keypoints from the source (image to be registered) and target (reference image)
src = keypoints2[matches12[:, 1]][:, ::-1]
dst = keypoints1[matches12[:, 0]][:, ::-1]
model_robust, inliers = ransac((src, dst), SimilarityTransform,
min_samples=4, residual_threshold=1, max_trials=300)
return model_robust, inliers
###################################################################################
if __name__ == "__main__":
# resiz is set so to make feature detect faster
image_files = sorted(os.listdir("c:/temp/average"))
for img in image_files:
if img.split(".")[-1].lower() not in ["jpg", "jpeg", "png"]:
image_files.remove(img)
# Assuming all images are the same size, get dimensions of first image
w, h = Image.open("c:/temp/average/{}".format(image_files[0])).size
N = len(image_files)
# Create a numpy array of floats to store the average (assume RGB images)
arr = np.zeros((h, w, 3), np.float)
for im in image_files:
imarr = np.array(Image.open("c:/temp/average/{}".format(im)), dtype=np.float32)
arr = arr + imarr / N
# Round values in array and cast as 8-bit integer
arr = np.array(np.round(arr), dtype=np.uint8)
# Generate, save and preview final image
out = Image.fromarray(arr, mode="RGB")
out.save("c:/temp/Average.png")
out.show()
print ("That's All Folks!")
if False:
import os, numpy, PIL
from PIL import Image
# Access all PNG files in directory
allfiles = os.listdir(os.getcwd())
imlist = [filename for filename in allfiles if filename[-4:] in [".png", ".PNG"]]
# Assuming all images are the same size, get dimensions of first image
w, h = Image.open(imlist[0]).size
N = len(imlist)
# Create a numpy array of floats to store the average (assume RGB images)
arr = numpy.zeros((h, w, 3), numpy.float)
# Build up average pixel intensities, casting each image as an array of floats
for im in imlist:
imarr = numpy.array(Image.open("c:/temp/average/{}".format(im)), dtype=numpy.float)
arr = arr + imarr / N
# Round values in array and cast as 8-bit integer
arr = numpy.array(numpy.round(arr), dtype=numpy.uint8)
# Generate, save and preview final image
out = Image.fromarray(arr, mode="RGB")
out.save("Average.png")
out.show()
```
#### File: johnnewto/FocusStackPy/FocusStackImages.py
```python
import os
#import cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from skimage.color import rgb2gray, gray2rgb
import time
from skimage.feature import ORB, match_descriptors
from skimage.measure import ransac
from skimage.transform import resize, rescale
from skimage.io import imread, imsave
from skimage.morphology import reconstruction
import time
########################################################################33
def focusmeasureLAPD(img, filtersiz):
from scipy.ndimage import convolve
from scipy.ndimage import correlate1d
from scipy.ndimage.filters import uniform_filter
# M = [-1 2 - 1];
# Lx = imfilter(Image, M, 'replicate', 'conv');
# Ly = imfilter(Image, M', 'replicate', 'conv');
# FM = abs(Lx) + abs(Ly);
# FM = mean2(FM);
img = rgb2gray(img)
M = np.array([-1, 2, -1])
img1 = correlate1d(img, M, mode='constant', cval=0.0)
M = np.transpose(M)
img2 = correlate1d(img, M, mode='constant', cval=0.0)
img = np.abs(img1) + np.abs(img2)
if filtersiz > 0:
img = uniform_filter(img, size=filtersiz, mode='reflect')
return img
def focusmeasureHELM(Image, filtersiz):
from scipy.ndimage import convolve
from scipy.ndimage import correlate1d
from scipy.ndimage.filters import uniform_filter
# case 'HELM' %Helmli's mean method (Helmli2001)
# U = imfilter(Image, MEANF, 'replicate');
# R1 = U./Image;
# R1(Image==0)=1;
# index = (U>Image);
# FM = 1./R1;
# FM(index) = R1(index);
# FM = imfilter(FM, MEANF, 'replicate');
# end
np.seterr(divide='ignore')
Image = rgb2gray(Image)
U = uniform_filter(Image, size=filtersiz, mode='reflect')
with np.errstate(divide='ignore', invalid='ignore'):
R1 = np.divide(U, Image)
R1[R1 == np.inf] = 0
R1 = np.nan_to_num(R1)
R1[Image==0] = 1
index = (U > Image)
with np.errstate(divide='ignore', invalid='ignore'):
FM = np.divide(1., R1)
FM[FM == np.inf] = 0
FM = np.nan_to_num(FM)
FM[index] = R1[index]
FM = uniform_filter(FM, size=filtersiz, mode='reflect')
return FM
def CalcIndex(images):
start = time.time()
shp = images[0].shape
# if shp[0] > 2000:
# fm = np.zeros((int(shp[0]/2), int(shp[1]/2), len(images)))
# else:
fm = np.zeros((int(shp[0]), int(shp[1]), len(images)))
print(" focus measure")
for n in range (0, len(image_files) ):
print(" In Image{}".format(n))
fm[:,:,n] = focusmeasureHELM(images[n], 31)
print(" fmeasure {}".format(np.mean(fm[n])))
print(" Time Elapsed = {:.3f}".format(time.time() - start))
im = np.uint8(gray2rgb(fm[n]) * 255.0)
index = np.argmax(fm, axis=2)
index = fill(index)
heights = np.uint8(index * 255.0 / np.max(index))
return index, heights
def fill(img):
seed = np.ones_like(img)*255
img[ : ,0] = 0
img[ : ,-1] = 0
img[ 0 ,:] = 0
img[ -1 ,:] = 0
seed[ : ,0] = 0
seed[ : ,-1] = 0
seed[ 0 ,:] = 0
seed[ -1 ,:] = 0
return reconstruction(seed, img, method='erosion')
def old_CalcStack(index, images):
print(" Calc Masks and stacking")
shp = images[0].shape
stack = np.uint8(np.zeros((shp[0], shp[1], 3)))
for n in range(0, np.amax(index)+1):
m = np.where([index == n],1,0).reshape(shp[0], shp[1])
a = images[n]
stack[:,:,0] = np.add(stack[:,:,0],np.multiply(m[:,:], a[:,:,0]))
stack[:,:,1] = np.add(stack[:,:,1],np.multiply(m[:,:], a[:,:,1]))
stack[:,:,2] = np.add(stack[:,:,2],np.multiply(m[:,:], a[:,:,2]))
return stack
def CalcStack(index, images):
print(" Calc Masks and stacking")
shp = images[0].shape
mask = np.uint8(np.zeros((shp[0], shp[1], 3, len(images))))
stack = np.uint8(np.zeros((shp[0], shp[1], 3)))
for n in range(0, len(images)):
m = (np.where([index == n],1,0).reshape(shp[0], shp[1]))
mask[:,:,0,n ] = m
mask[:,:,1,n ] = m
mask[:,:,2,n ] = m
focusmask = np.multiply(mask[:,:,:,n ], images[n])
print (" Saving mask {}".format(n))
imsave("stacked/mask{:02d}.jpg".format(n), focusmask)
stack = np.add(stack,focusmask)
return stack
###################################################################################
if __name__ == "__main__":
image_files = sorted(os.listdir("aligned"))
for img in image_files:
if img.split(".")[-1].lower() not in ["jpg", "jpeg", "png"]:
image_files.remove(img)
n = 0
images = []
for imgN in image_files:
imgN = image_files[n]
print ("Reading in file {}".format(imgN))
img = imread("aligned/{}".format(imgN))
# if img.shape[0] > 2000:
# # img = resize(img, (img.shape[0] / 2, img.shape[1] / 2))
# img = rescale(img, 0.5)
# images[:,:,:,n] =img
images.append(img)
n = n + 1
if True:
index, heights = CalcIndex(images)
imsave("stacked/HeightMap.jpg", heights)
np.save('stacked/index.npy', index)
index = np.load('stacked/index.npy')
heights = imread("stacked/HeightMap.jpg")
start = time.time()
stack = CalcStack(index, images)
stack = np.uint8(stack)
# stack = rescale(stack, 2)
# stack = np.uint8(stack*255)
imsave("stacked/stack1.jpg", np.uint8(stack))
print(" Time Elapsed = {:.3f}".format(time.time() - start))
fig, (ax0, ax1) = plt.subplots(1, 2,
subplot_kw={'xticks': [], 'yticks': []},
figsize=(12, 8))
cax = ax0.imshow(heights, cmap=cm.hot)
cbar = fig.colorbar(cax, ticks=[-1, 0, 1])
ax1.imshow(stack)
plt.show()
print ("That's All Folks!")
```
#### File: johnnewto/FocusStackPy/MatchImagesThreaded.py
```python
import os
import numpy as np
from skimage.color import rgb2gray
from skimage.feature import ORB, match_descriptors
from skimage.measure import ransac
from skimage.transform import warp, downscale_local_mean, resize, SimilarityTransform
from skimage.io import imread, imsave
import time
import threading
########################################################################33
def detectFeatures(img, resiz, keypoints, descriptors):
orb = ORB(n_keypoints=500, fast_threshold=0.05)
img = rgb2gray(img)
img = resize(img, (img.shape[0] * resiz, img.shape[1] * resiz), mode = 'reflect')
orb.detect_and_extract(img)
keypoints.append(orb.keypoints)
descriptors.append(orb.descriptors)
# return orb.keypoints, orb.descriptors
def matchFeatures(keypoints1, descriptors1, keypoints2, descriptors2):
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
# Select keypoints from the source (image to be registered) and target (reference image)
src = keypoints2[matches12[:, 1]][:, ::-1]
dst = keypoints1[matches12[:, 0]][:, ::-1]
model_robust, inliers = ransac((src, dst), SimilarityTransform,
min_samples=4, residual_threshold=1, max_trials=300)
return model_robust, inliers
def detectAllFeatures(images, resiz):
print(" detecting features")
keypoints = []
descriptors = []
for n in range (0, len(images) ):
detectFeatures(images[n], resiz, keypoints, descriptors)
print(" Time Elapsed = {:.3f}".format(time.time() - start))
return keypoints, descriptors
def detectAllFeaturesThreaded(images, resiz):
print(" detecting features")
keypoints = []
descriptors = []
threads = []
for n in range (0, len(images) ):
t = threading.Thread(target=detectFeatures, args=(images[n], resiz, keypoints, descriptors,))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
print(" Time Elapsed = {:.3f}".format(time.time() - start))
return keypoints, descriptors
def matchAllFeatures(keypoints, descriptors):
print(" matching features")
tform = []
tform.append(SimilarityTransform(scale=1))
for n in range (1, len(images) ):
tf, inliers = matchFeatures(keypoints[n-1], descriptors[n-1], keypoints[n], descriptors[n])
tf.translation[0] /=resiz
tf.translation[1] /=resiz
tform.append(tf)
print(" Time Elapsed = {:.3f}".format(time.time() - start))
return tform
def warpWrapper(images, n, tf):
images[n] = warp(images[n], tf.inverse)
def warpFeatures(images, tform):
print(" warping features")
tf = tform[0]
for n in range(1, len(images)):
tf = tf + tform[n]
# images[n] = warp(images[n], tf.inverse)
wrapWarp(images, n, tf)
print(" Time Elapsed = {:.3f}".format(time.time() - start))
def warpFeaturesThreaded(images, tform):
print(" warping features")
threads = []
tf = tform[0]
for n in range(1, len(images)):
tf = tf + tform[n]
t = threading.Thread(target=warpWrapper, args=(images, n, tf,))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
print(" Time Elapsed = {:.3f}".format(time.time() - start))
###################################################################################
if __name__ == "__main__":
# resiz is set so to make feature detect faster
resiz = 0.25 # reduces from 6 seconds down to 2.3 seconds
image_files = sorted(os.listdir("input"))
for img in image_files:
if img.split(".")[-1].lower() not in ["jpg", "jpeg", "png"]:
image_files.remove(img)
images = []
for imgN in image_files:
print ("Reading in file {}".format(imgN))
img = imread("input/{}".format(imgN))
images.append(img)
start = time.time()
n = 0
print("Image {}".format(n))
imsave("aligned/aligned{:02d}.jpg".format(n), images[n])
keypoints, descriptors = detectAllFeatures(images, resiz)
# keypoints, descriptors = detectAllFeaturesThreaded(images, resiz)
tform = matchAllFeatures(keypoints, descriptors)
warpFeaturesThreaded(images, tform)
# warpFeatures(images, tform)
for n in range(1, len(images)):
print(" Image save {}".format(n))
images[n] = np.uint8(images[n]*255.0)
imsave("aligned/aligned{:02d}.jpg".format(n), images[n])
print ("That's All Folks!")
```
#### File: johnnewto/FocusStackPy/threadTest.py
```python
import threading
def worker(num):
"""thread worker function"""
print ('Worker: %s' % num)
return
threads = []
for i in range(5):
t = threading.Thread(target=worker, args=(i,))
threads.append(t)
t.start()
``` |
{
"source": "johnnewto/RPI-Micro-I",
"score": 2
} |
#### File: RPI-Micro-I/oldAdafruit_MotorHAT/Adafruit_PWM_Servo_Driver.py
```python
import time
import math
from Adafruit_MotorHAT.Adafruit_I2C import Adafruit_I2C
# ============================================================================
# Adafruit PCA9685 16-Channel PWM Servo Driver
# ============================================================================
class PWM :
# Registers/etc.
__MODE1 = 0x00
__MODE2 = 0x01
__SUBADR1 = 0x02
__SUBADR2 = 0x03
__SUBADR3 = 0x04
__PRESCALE = 0xFE
__LED0_ON_L = 0x06
__LED0_ON_H = 0x07
__LED0_OFF_L = 0x08
__LED0_OFF_H = 0x09
__ALL_LED_ON_L = 0xFA
__ALL_LED_ON_H = 0xFB
__ALL_LED_OFF_L = 0xFC
__ALL_LED_OFF_H = 0xFD
# Bits
__RESTART = 0x80
__SLEEP = 0x10
__ALLCALL = 0x01
__INVRT = 0x10
__OUTDRV = 0x04
general_call_i2c = Adafruit_I2C(0x00)
@classmethod
def softwareReset(cls):
"Sends a software reset (SWRST) command to all the servo drivers on the bus"
cls.general_call_i2c.writeRaw8(0x06) # SWRST
def __init__(self, address=0x40, debug=False):
self.i2c = Adafruit_I2C(address)
self.i2c.debug = debug
self.address = address
self.debug = debug
if (self.debug):
print ("Reseting PCA9685 MODE1 (without SLEEP) and MODE2")
self.setAllPWM(0, 0)
self.i2c.write8(self.__MODE2, self.__OUTDRV)
self.i2c.write8(self.__MODE1, self.__ALLCALL)
time.sleep(0.005) # wait for oscillator
mode1 = self.i2c.readU8(self.__MODE1)
mode1 = mode1 & ~self.__SLEEP # wake up (reset sleep)
self.i2c.write8(self.__MODE1, mode1)
time.sleep(0.005) # wait for oscillator
def setPWMFreq(self, freq):
"Sets the PWM frequency"
prescaleval = 25000000.0 # 25MHz
prescaleval /= 4096.0 # 12-bit
prescaleval /= float(freq)
prescaleval -= 1.0
if (self.debug):
print ("Setting PWM frequency to %d Hz") % freq
print ("Estimated pre-scale: %d") % prescaleval
prescale = math.floor(prescaleval + 0.5)
if (self.debug):
print ("Final pre-scale: %d") % prescale
oldmode = self.i2c.readU8(self.__MODE1);
newmode = (oldmode & 0x7F) | 0x10 # sleep
self.i2c.write8(self.__MODE1, newmode) # go to sleep
self.i2c.write8(self.__PRESCALE, int(math.floor(prescale)))
self.i2c.write8(self.__MODE1, oldmode)
time.sleep(0.005)
self.i2c.write8(self.__MODE1, oldmode | 0x80)
def setPWM(self, channel, on, off):
"Sets a single PWM channel"
self.i2c.write8(self.__LED0_ON_L+4*channel, on & 0xFF)
self.i2c.write8(self.__LED0_ON_H+4*channel, on >> 8)
self.i2c.write8(self.__LED0_OFF_L+4*channel, off & 0xFF)
self.i2c.write8(self.__LED0_OFF_H+4*channel, off >> 8)
def setAllPWM(self, on, off):
"Sets a all PWM channels"
self.i2c.write8(self.__ALL_LED_ON_L, on & 0xFF)
self.i2c.write8(self.__ALL_LED_ON_H, on >> 8)
self.i2c.write8(self.__ALL_LED_OFF_L, off & 0xFF)
self.i2c.write8(self.__ALL_LED_OFF_H, off >> 8)
``` |
{
"source": "johnnfujita/shop_boilerplate",
"score": 2
} |
#### File: shop_boilerplate/shop/recommender.py
```python
import redis
from django.conf import settings
from .models import Product
r = redis.Redis(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_DB)
class Recommender(object):
def get_product_key(self, id):
return f'product:{id}:purchased_with'
def products_bought(self, products):
product_ids = [p.id for p in products]
for product_id in product_ids:
for with_id in product_ids:
if product_id != with_id:
r.zincrby(self.get_product_key(product_id), 1, with_id)
def suggest_products_for(self, products, max_results=6):
product_ids = [p.id for p in products]
if len(products) ==1:
suggestions = r.zrange(
self.get_product_key(product_ids[0]),
0, -1, desc=True)[:max_results]
else:
flat_ids = ''.join([str(id) for id in product_ids])
tmp_key = f'tmp_{flat_ids}'
keys = [self.get_product_key(id) for id in product_ids]
r.zunionstore(tmp_key, keys)
r.zrem(tem_key, *product_ids)
suggestions = r.zrange(tmp_key, 0, -1, desc=True)[:max_results]
r.delete(tmp_key)
suggested_products_ids = [int(id) for id in suggestions]
suggested_products = list(Product.objects.filter(id__in=suggested_products_ids))
suggested_products.sort(key=lambda x: suggested_products_ids.index(x.id))
return suggested_products
def clear_purchases(self):
for id in Product.objects.values_list('id', flat=True):
r.delete(self.get_product_key(id))
``` |
{
"source": "johnnfujita/social_django",
"score": 2
} |
#### File: social_django/images/signals.py
```python
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from .models import Image
@receiver(m2m_changed, sender=Image.users_like.through)
def user_like_changed(sender, instance, **kwargs):
instance.total_likes = instance.users_like.count()
instance.save()
``` |
{
"source": "johnnfujita/unreal_to_web3_local_server",
"score": 2
} |
#### File: api/app/crud.py
```python
from sqlalchemy.orm import Session
from app import models, schemas
from sqlalchemy import and_
def get_nfts_by_user( db:Session, user_address: str, nft_on_block_count: int, skip: int = 0, limit: int = 100):
db.query(models.NFTEntry).filter(and_(user_address == models.NFTEntry.owner_account, models.NFTEntry.token_id <= nft_on_block_count)).update({models.NFTEntry.read_available: True}, synchronize_session=False)
nfts = db.query(models.NFTEntry).filter(user_address == models.NFTEntry.owner_account).all()
return nfts
def mint_nft(db:Session, token_id: int,sender: str, car: str):
db_nft = models.NFTEntry(token_id=token_id, owner_account=sender, car_model=car)
db.add(db_nft)
db.commit()
db.refresh(db_nft)
return db_nft
```
#### File: api/app/main.py
```python
from fastapi import FastAPI, Depends, BackgroundTasks
from app.config import get_settings, Settings
from app.web3_interfacer import create_a_collectible, get_car, get_user_account, check_minted_nfts
from web3_vanilla.new_eth_address import create_account
from app import models, schemas, crud
from app.database import SessionLocal, engine
from typing import List
from sqlalchemy.orm import Session
models.Base.metadata.create_all(bind=engine)
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
app = FastAPI()
@app.get("/mint_token/{nft_identifier}", response_model=schemas.NFTEntry)
def mint(nft_identifier: int, getsettings: Settings = Depends(get_settings), db: Session = Depends(get_db)):
sender, token_id, car = create_a_collectible(nft_identifier)
sender = sender.address
if not token_id:
raise Exception("no value returned")
nft_token = crud.mint_nft(db, token_id=token_id, sender=sender, car=car)
return nft_token
@app.get("/get_all_my_nft", response_model=List[schemas.NFTEntry])
def get_all_my_nft(settings: Settings = Depends(get_settings), db: Session = Depends(get_db)):
nft_on_block_count, sender = check_minted_nfts()
nfts_from_user = crud.get_nfts_by_user(db, user_address=sender, nft_on_block_count=nft_on_block_count)
return nfts_from_user
@app.get("/generate_ethereum_address")
async def generate_eth_address(settings: Settings = Depends(get_settings)):
pk, address = create_account()
return {"data":
[
{
"pk": pk,
"address": address,
}
]
}
```
#### File: api/web3_vanilla/new_eth_address.py
```python
from eth_account import Account
import secrets
def create_account():
priv = secrets.token_hex(32)
private_key = "0x" + priv
print("Game specific private key... this is for an optional wallet for you not to worry on exposing your assets from other wallets\n")
print(f"pk: {private_key}")
acct = Account.from_key(private_key)
print("Address:", acct.address)
return (private_key, acct.address)
create_account()
```
#### File: nft/scripts/flatten.py
```python
from brownie import FigaNFT, accounts, network, config, interface
import json
def main():
flatten()
def flatten():
file = open("./FigaNFT_flattened.json", "w")
json.dump(FigaNFT.get_verification_info(), file)
file.close()
``` |
{
"source": "johnngnky/luhn",
"score": 3
} |
#### File: johnngnky/luhn/test.py
```python
import luhn
def test_checksum_len1():
assert luhn.checksum('7') == 7
def test_checksum_len2():
assert luhn.checksum('13') == 5
def test_checksum_len3():
assert luhn.checksum('383') == 3
def test_checksum_len4():
assert luhn.checksum('2827') == 3
def test_checksum_len13():
assert luhn.checksum('4346537657597') == 9
def test_checksum_len14():
assert luhn.checksum('27184931073326') == 1
def test_valid():
assert luhn.verify('356938035643809')
def test_invalid():
assert not luhn.verify('4222222222222222')
def test_generate():
assert luhn.generate('7992739871') == 3
def test_append():
assert luhn.append('53461861341123') =='534618613411234'
``` |
{
"source": "johnnguytx/PythonMarbleGenerator",
"score": 4
} |
#### File: johnnguytx/PythonMarbleGenerator/py_mod_marbles.py
```python
import random
# --- MODULE VARIABLES
MIN_BAGS = 5
MAX_BAGS = 15
MIN_MARBLES = 180
MAX_MARBLES = 220
FILENAME = "MarbleReport.txt"
# --- FUNCTIONS
def displayMessage(msg):
# PURPOSE: Displays a simple message
# RETURNS: n/a
# IN: msg, to be displayed
# OUT: n/a
print(msg)
def getNumBags():
# PURPOSE: Prompts the user for a number of bags
# RETURNS: bags, number of bags
# IN: n/a
# OUT: n/a
bags = int(input("Enter a number of bags between 5 and 15: "))
while bags < MIN_BAGS or bags > MAX_BAGS:
bags = int(input("Please enter number of bags within the specified range: "))
return bags
def table(bags):
# PURPOSE: Controls generation of the table by calling other functions
# RETURNS: n/a
# IN: bags, number of bags
# OUT: n/a
colors = {
"red": 0,
"orange": 0,
"yellow": 0,
"green": 0,
"blue": 0,
"purple": 0,
"total" : 0,
}
id = 1
seed()
f = open(FILENAME, "w")
header(f)
i = 0
while i < bags:
a = row(f,id)
i += 1
id += 1
for x in a:
num1 = a[x]
num2 = colors[x]
sum1 = num1 + num2
colors[x] = sum1
footer(f,colors)
f.close
def seed():
# PURPOSE: Seeds the random number generator
# RETURNS: n/a
# IN: n/a
# OUT: n/a
random.seed()
def header(f):
# PURPOSE: displays the table header and writes it to the file
# RETURNS: n/a
# IN: n/a
# OUT: n/a
line = "{:>3}{:>6}{:>6}{:>6}{:>6}{:>6}{:>6}{:>6}"
print(line.format("BAG","RED","ORG","YEL","GRN","BLU","PPL","TOT"))
print("---------------------------------------------")
f.write(line.format("BAG","RED","ORG","YEL","GRN","BLU","PPL","TOT\n"))
f.write("---------------------------------------------\n")
def row(f,id):
# PURPOSE: displays a table row and writes it to the file
# RETURNS: Returns a dictionary of bag color counts
# IN: f, the output file, id the bag id
# OUT: n/a
colors = {"red": 0,"orange": 0,"yellow": 0,"green": 0,"blue": 0,"purple": 0}
a = (rng(MIN_MARBLES,MAX_MARBLES))
i = 0
while i < a:
num = random.choice(list(colors))
colors[num] += 1
i += 1
total = colors["red"] + colors["orange"] + colors["yellow"] + colors["green"] + colors["blue"] + colors["purple"]
line = "{:>3}{:>6}{:>6}{:>6}{:>6}{:>6}{:>6}{:>6}"
print(line.format(id,colors["red"],colors["orange"],colors["yellow"],colors["green"],colors["blue"],colors["purple"],total))
f.write(line.format(id,colors["red"],colors["orange"],colors["yellow"],colors["green"],colors["blue"],colors["purple"],total))
f.write("\n")
return colors
def rng(lower, upper):
# PURPOSE: generates a random number
# RETURNS: a random number within given bounds
# IN: upper, the upper bound, lower, the lower bound
# OUT: n/a
num = random.randint(lower, upper)
return num
def footer(f,colors):
# PURPOSE: Displays the table footer and writes it to the file
# RETURNS: a random number within given bounds
# IN: f, the output file, colors, a dictionary of total color counts, with color names (strings) as the keys and total color counts as the values
# OUT: n/a
tot = colors["red"] + colors["orange"] + colors["yellow"] + colors["green"] + colors["blue"] + colors["purple"]
line = "{:>3}{:>6}{:>6}{:>6}{:>6}{:>6}{:>6}{:>6}"
print("---------------------------------------------")
print(line.format("TOT",colors["red"],colors["orange"],colors["yellow"],colors["green"],colors["blue"],colors["purple"],tot))
f.write("---------------------------------------------\n")
f.write(line.format("TOT",colors["red"],colors["orange"],colors["yellow"],colors["green"],colors["blue"],colors["purple"],tot,"\n"))
``` |
{
"source": "JohnnieFucker/WebSpider",
"score": 3
} |
#### File: webspider/spiders/a51CTOBookSpider.py
```python
import scrapy
from webspider.items import A51CTOBookItem
class A51ctobookspiderSpider(scrapy.Spider):
name = '51CTOBookSpider'
allowed_domains = ['51cto.com']
start_urls = []
headers = {
'Accept': 'text / html, application / xhtml + xml, application / xml;q = 0.9, image / webp, image / apng, * / *;q = 0.8',
'Accept-Encoding': 'deflate, br',
'Conection': 'keep-alive'
}
def start_requests(self):
global headers
url_head = 'http://book.51cto.com/art/201106/2690'
for i in range(31):
urlSuffix = i + 36
url = url_head + '%s.htm' % urlSuffix
self.start_urls.append(url)
for url in self.start_urls:
print url
yield scrapy.Request(url, callback=self.parse, headers=self.headers)
def parse(self, response):
global headers
content = response.xpath('//div[@id="content"]').extract_first()
fitem = A51CTOBookItem()
fitem['content'] = content
yield fitem
``` |
{
"source": "JohnniOstergaard/Python",
"score": 3
} |
#### File: JohnniOstergaard/Python/DataBase_and_Ploting.py
```python
import sqlite3 #SQLite database module
import time #Time module
import datetime #Datetime module
import random #Random number generator module
#Third party modules
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import style
style.use('fivethirtyeight')
#Info about modules
#MatPlotLib website: http://matplotlib.org/
#SQLite db definitions =========================================================
conn = sqlite3.connect('Test.db') #Connect to a database and/or created it
c = conn.cursor() #Defining a db cursor
#Create a table with columns ------------------------------------------------
def Tb_create(): #Create a table in Test.db
c.execute('CREATE TABLE IF NOT EXISTS stuffToPlot(\
unix REAL, datestamp TEXT, keyword TEXT, value REAL)')
#Table name: stuffToPlot
#SQLite cmd: 'CREATE TABLE IF NOT EXISTS'
#SQLITE columns types: REAL, TEXT
#SQLite columns names: unix, datestamp, keyword, value
#SQLite cmd is not case sensitive! but it lets the reader know its a db cmd
#Remeber that the data type and number of chr in the table determinants it size
#Write values and strings to the table in db --------------------------------
def Tb_entry(): #Write data to table in SQLite db
c.execute("INSERT INTO stuffToPlot VALUES(124, '12-03-2017', 'Python', 51)")
#Insert data to all colums in a new row
conn.commit() #Save the inserted data to the table
#c.close() #Close the cursor in the SQLite db
#conn.close() #Close the connection to the SQLite db
#Write variables to the table in db -----------------------------------------
def dynamic_data_entry(): #Write variables to the table of db
unix = time.time() #Get current time stamp
data = str(datetime.datetime.fromtimestamp(unix).strftime('%Y-%m-%d %H:%M:%S'))
keyword = 'Python'
value = random.randrange(0,10) #Create a random number between 0 and 10
c.execute("INSERT INTO stuffToPlot (unix, datestamp, keyword, value)\
VALUES (?, ?, ?, ?)", (unix, data, keyword, value))
conn.commit()
#The order the columns is insert into the table do not matter
#All columns in the row must be present when insert data, to
#skip a column by leaveing a column blank in insert: (unix, datestamp, , vale)
#Read selected data for the table in db -------------------------------------
def Tb_read():
#Cursor selecting-----------------------------------------------------
#c.execute("SELECT * FROM stuffToPlot") #All data in the table
#c.execute("SELECT * FROM stuffToPlot WHERE value >=3 AND keyword ='Python'")
c.execute("SELECT keyword, value, datestamp FROM stuffToPlot") #select columns and order
#Where = search filters
# (*) stand for (all)
#data = c.fetchall() #Fetch all selected
#data = c.fetchone() #Fetch one selected row
#print(data) #Print all data in table on one line
#Fetch all selected data ---------------------------------------------
for row in c.fetchall(): #fetchall do not hold data after used!
print(row) #Print each row in the table
#print(row[0]) #Print only the first column
#Update data in the table ---------------------------------------------------
def Tb_update(old, new):
#c.execute("UPDATE stuffToPlot SET value=1 WHERE value=7") #No variable input
c.execute("UPDATE stuffToPlot SET value=? WHERE value=?", (old, new)) #Variable input
conn.commit()
print('Updated table: \n', read_from_db())
#Delete data in the table ---------------------------------------------------
def Tb_delete(number, limit):
#Check number of target rows -----------------------------------------
c.execute("SELECT * FROM stuffToPlot WHERE value =?", (number,)) #Select target rows
if(len(c.fetchall()) <= limit): #Limited deleting mistakes!
#Delete from table----------------------------------------------------
#c.execute('DELETE FROM stuffToPlot WHERE value =99') #No variable input
c.execute("DELETE FROM stuffToPlot WHERE value =?", (number,)) #Variable input
conn.commit()
#Print response ------------------------------------------------------
print('Deleting successfully')
c.execute("SELECT * FROM stuffToPlot")
for row in c.fetchall():
print(row)
else:
print('To many row are selected')
#MatPlotLib definitions ========================================================
def graph_data(): #Plot a graph from data in table
c.execute("SELECT unix, value FROM stuffToPlot")
dates = []
values = []
for row in c.fetchall():
#print(row[0])
#print(datetime.datetime.fromtimestamp(row[0]))
dates.append(datetime.datetime.fromtimestamp(row[0]))
values.append(row[1])
else:
plt.plot_date(dates, values, '-')
plt.show()
#Function calls ================================================================
Tb_create() #Create a table
#Tb_entry() #Input data to the table in db
Tb_read() #Read values in table from db
#Tb_update(1,7) #(Old value, new value)
#Tb_delete(6,3) #(Value to delet, row limit)
graph_data() #Plot graph from values in table
#Dynamic_data_entry ---------------------------------------------------------
"""for i in range(10): #Create 10 rows in the table with data
dynamic_data_entry()
time.sleep(1) #Sleep for 1 second to see a new time stamp in db!"""
#End program ===================================================================
c.close()
conn.close()
``` |
{
"source": "JohnNKing/snbopen",
"score": 2
} |
#### File: JohnNKing/snbopen/snbopen.py
```python
from zipfile import ZipFile
from xml.dom.minidom import parseString
from zlib import decompress
from PIL import Image
from os import system,sep,remove
from sys import argv
from reportlab.pdfgen import canvas
from io import BytesIO
from reportlab.pdfbase.pdfutils import ImageReader
from re import sub
from tempfile import gettempdir
from reportlab.lib.utils import ImageReader
def showUsage():
print """
Usage: snbopen snbfile [pdffile]
snbopen opens .snb files created by samsung tablets
if pdf file is specified the program converts the snb file to the pdf.
"""
def zipRead(zipFile,filename):
tempFile = zipFile.open(filename)
str = tempFile.read()
tempFile.close()
return str
def addImage(snbFile,canvas,image,rels,element):
imgFileName = "snote/"+rels[image.getAttribute("r:id")]
imgStr = zipRead(snbFile,imgFileName)
print "Adding image " + imgFileName
if imgFileName.endswith(".zdib"):
imgStr = decompress(imgStr)
width = ord(imgStr[5]) * 256 + ord(imgStr[4])
height = ord(imgStr[9]) * 256 + ord(imgStr[8])
try:
img = Image.frombytes("RGBA",(width,height),imgStr[52:])
except:
img = Image.fromstring("RGBA",(width,height),imgStr[52:])
filename = "outTmp.png"
img.save(filename)
img = ImageReader(filename)
canvas.drawImage(img,0,0,595.27,841.89,mask="auto")
remove(filename)
elif imgFileName.endswith(".png"):
style = imagePoss(element.getElementsByTagName("v:shape")[0].getAttribute("style"),"image")
img=Image.open(BytesIO(imgStr))
filename = "outTmp.png"
img.save(filename)
img = ImageReader(filename)
canvas.drawImage(img,style.left,style.bottom,style.width,style.height,mask="auto")
remove(filename)
else:
style = imagePoss(element.getElementsByTagName("v:shape")[0].getAttribute("style"),"bg")
img=Image.open(BytesIO(imgStr))
filename = "outTmp.jpg"
img.save(filename)
img = ImageReader(filename)
canvas.drawImage(img,style.left,style.bottom,style.width,style.height,mask="auto")
remove(filename)
def addBgImage(snbFile,canvas,imgFileName,styleAttribute):
imgStr = zipRead(snbFile,imgFileName)
print "Adding image " + imgFileName
style = imagePoss(styleAttribute,"bg")
img=Image.open(BytesIO(imgStr))
filename = "outTmp.png"
img.save(filename)
img = ImageReader(filename)
canvas.drawImage(img,style.left,style.bottom,style.width,style.height,mask="auto")
remove(filename)
def addText(canvas,element,styles):
for run in element.getElementsByTagName("sn:r"):
if(len(run.getElementsByTagName("sn:t")) > 0):
##TODO: support italic, bold and underlined text
charStyle = styles["Character" + run.getAttributeNode("sn:rStyle").value]
text=run.getElementsByTagName("sn:t")[0].firstChild.nodeValue
canvas.setFont("Helvetica",charStyle.size)
canvas.setFillColor(charStyle.color)
canvas.drawString(40,810-charStyle.size,text)
##TODO: support non-unicode characters
def readRelsFile(snbFile):
relations = parseString(zipRead(snbFile,"snote/_rels/snote.xml.rels"))
rels=dict()
for relation in relations.getElementsByTagName("Relationship"):
rels[relation.getAttribute("Id")] = relation.getAttribute("Target")
return rels
def readRelsMasterFile(snbFile):
relations = parseString(zipRead(snbFile,"snote/_rels/master.xml.rels"))
rels=dict()
for relation in relations.getElementsByTagName("Relationship"):
rels[relation.getAttribute("Id")] = relation.getAttribute("Target")
return rels
def readCharStyles(snbFile):
styles = parseString(zipRead(snbFile,"snote/styles.xml"))
charStyles = dict()
for style in styles.getElementsByTagName("sn:style"):
if style.getAttributeNode("sn:type").value == "character":
if len(style.getElementsByTagName("sn:color"))>0:
color = style.getElementsByTagName("sn:color")[0].getAttribute("sn:val")
else:
color = "000000"
if len(style.getElementsByTagName("sn:sz"))>0:
size = int(style.getElementsByTagName("sn:sz")[0].getAttribute("sn:val"))*.5
else:
size = 16
charStyles[style.getAttribute("sn:styleId")] = Style(len(style.getElementsByTagName("sn:b"))>0,
len(style.getElementsByTagName("sn:i"))>0, len(style.getElementsByTagName("sn:u"))>0,color,size)
return charStyles
class Style:
def __init__(self, bold, italic, underline,color="000000",size=48):
self.bold = bold
self.italic = italic
self.underline = underline
self.color = "0X"+color
self.size=size
class imagePoss:
def __init__(self,style,imageType):
styleDict = dict(item.split(":") for item in style.replace("pt","").split(";"))
if imageType == "bg":
self.left=float(styleDict["left"])
try:
self.bottom = 841.89 -(float(styleDict["margin-top"])+float(styleDict["height"]))
except:
self.bottom = 0
self.width = float(styleDict["width"])
self.height = float(styleDict["height"])
elif imageType == "image":
try:
self.left=float(styleDict["margin-left"])
except:
self.left=0
try:
self.bottom=841.89-(float(styleDict["margin-top"])+float(styleDict["height"]))
except:
self.bottom=0
self.width = float(styleDict["width"])
self.height = float(styleDict["height"])
def alpha_to_color(image, color=(255, 255, 255)):
image.load() # needed for split()
background = Image.new('RGB', image.size, color)
background.paste(image, mask=image.split()[3]) # 3 is the alpha channel
return background
def snbToPdf(snbname,pdfname = None ):
snbFile=ZipFile(snbname,"r")
print "processing " + pdfname
rels = readRelsFile(snbFile)
charStyles = readCharStyles(snbFile)
snote = parseString(zipRead(snbFile,"snote/snote.xml"))
bodyElements=snote.firstChild.firstChild.childNodes
try:
master = parseString(zipRead(snbFile,"snote/master.xml"))
masterBodyElements=master.firstChild.firstChild.childNodes
bgImageId = master.getElementsByTagName("v:imagedata")[0].getAttribute("r:id")
bgImageStyle = master.getElementsByTagName("v:shape")[0].getAttribute("style")
relsMaster = readRelsMasterFile(snbFile)
bgImgFileName = "snote/" + relsMaster[bgImageId]
except:
bgImgFileName = None
for element in bodyElements:
if element.nodeName=="sn:page":
if 'pdfCanvas' in vars():
pdfCanvas.showPage()
else:
pdfCanvas = canvas.Canvas(pdfname if pdfname else snbname.replace(".snb",".pdf"))
if bgImgFileName is not None:
addBgImage(snbFile,pdfCanvas,bgImgFileName,bgImageStyle)
#ussually images
elif element.nodeName == "sn:SNoteObj":
images=element.getElementsByTagName("v:imagedata")
if len(images)!=0:
addImage(snbFile,pdfCanvas,images[0],rels,element)
else:
print "unknown SNoteObj" +"on page "+str(pdfCanvas.getPageNumber())
elif element.nodeName == "sn:l":
addText(pdfCanvas,element,charStyles)
else:
print "unknown element type:"+element.nodeName+" on page "+str(pdfCanvas.getPageNumber())
if(pdfname):
pdfCanvas.save()
snbFile.close()
if len(argv)==2:
#pdfFileName= gettempdir()+sep+sub('.*'+sep,"",argv[1]).replace(".snb",".pdf")
pdfFileName= argv[1].replace(".snb",".pdf")
snbToPdf(argv[1],pdfFileName)
elif len (argv)==3:
snbToPdf(argv[1],argv[2])
else:
showUsage()
``` |
{
"source": "johnno1962/SwiftPython",
"score": 2
} |
#### File: johnno1962/SwiftPython/bridgegen.py
```python
import inspect
import sys
import re
def main():
module = sys.argv[1]
if len(sys.argv) > 2:
sys.path.insert(1, sys.argv[2])
__import__(module)
module_name = module.replace(".", "_")
print("""
// Generated from module %s by bridgegen.py
public let %sModule = PythonModule(loading: "%s")""" % (module, module_name, module))
for name, obj in inspect.getmembers(sys.modules[module]):
if inspect.isfunction(obj):
genfunction(module_name, name, obj)
elif inspect.isclass(obj):
genclass(module_name, name, obj)
def asCall(swiftType):
dict = re.search(r": (\S+)\]", swiftType)
if dict:
return ".asDictionary(of: %s.self)" % dict.group(1)
else:
array = re.search(r"\[(\S+)\]", swiftType)
if array:
return ".asArray(of: %s.self)" % array.group(1)
elif swiftType != "Void":
return ".asAny(of: %s.self)" % swiftType
else:
return ".asVoid"
def asTypes(obj):
if obj.__doc__ == None:
return ("PythonObject", "")
returns = re.search(r"Swift returns (\[[^\]]+\]|\w+(?:<[^>]+>+)?)", obj.__doc__)
if returns == None:
return ("PythonObject", "")
swiftType = returns.group(1)
return (swiftType, asCall(swiftType))
def reserved(arg):
return arg if arg != "func" and arg != "where" else "`"+arg+"`"
def sanitise(args):
return ", ".join(map(lambda arg: reserved(arg), args))
def genargs(args, defaults, prefix=""):
opt = "?"
dflt = " = nil"
defaults = tuple(map(lambda x: None, range(len(args)-len(defaults)))) + defaults if defaults else defaults
return ", ".join(map(lambda i: ("" if prefix == "" else "_ ")+prefix+args[i]+": Any"+opt+ \
(dflt if not (defaults and i < len(defaults) and defaults[i] != None) else
' = "'+defaults[i]+'"' if isinstance(defaults[i], basestring) else \
(" = true" if defaults[i] else " = false") if isinstance(defaults[i], bool) else \
" = "+str(defaults[i]) if isinstance(defaults[i], (int, long, float)) else dflt), range(len(args)))) + \
(", " if len(args) else "")+("_ " if len(args) == 0 else "")+"kw: [String: Any]? = nil"
def genfunction(module, name, func):
args, varargs, keywords, defaults = inspect.getargspec(func)
(swiftType, asCall) = asTypes(func)
print("""
private let %sFunction = %sModule.function(named: "%s")
public func %s(%s) -> %s {
return %sFunction.call(args: [%s], kw: kw)%s
}""" %
(name, module, name,
name, genargs(args, defaults), swiftType,
name, sanitise(args), asCall))
if len(args) > 0:
print("""
public func %s(%s) -> %s {
return %s(%s, kw: kw)
}""" % (name, genargs(args, defaults, "_"), swiftType,
name, ", ".join(map(lambda arg: arg+": "+reserved("_"+arg), args))))
def genclass(module, classname, clazz):
print("""
public let %sClass = PythonClass(from: %sModule, named: "%s", type: %s.self)
public class %s: PythonObject {
public required init(any: Any) {
super.init(any: any)
}""" % (classname, module, classname, classname, classname))
if clazz.__doc__:
for name, swiftType in re.findall(r"Swift var (\w+): (\[[^\]]+\]|\w+(?:<[^>]+>)?)", clazz.__doc__):
if getattr(clazz, name, None):
cvar = "class "
avar = classname+"Class."
else:
cvar = ""
avar = ""
print("""
public %svar %s: %s {
get {
return %sgetAttr(named: "%s")%s
}
set(newValue) {
%ssetAttr(named: "%s", value: newValue)
}
}""" % (cvar, name, swiftType, avar, name, asCall(swiftType), avar, name))
for name, obj in inspect.getmembers(clazz):
if inspect.ismethod(obj):
if name == "__init__":
geninit(classname, name, obj)
else:
genmethod(classname, name, obj)
print("}")
def geninit(classname, name, func):
args, varargs, keywords, defaults = inspect.getargspec(func)
args = args[1:]
print("""
public init(%s) {
super.init(any: %sClass.call(args: [%s], kw: kw))
}""" %
(genargs(args, defaults), classname, sanitise(args)))
if len(args) > 0:
print("""
public convenience init(%s) {
self.init(%s, kw: kw)
}""" %
(genargs(args, defaults, "_"),
", ".join(map(lambda arg: arg+": "+reserved("_"+arg), args))))
def genmethod(classname, name, func):
args, varargs, keywords, defaults = inspect.getargspec(func)
(swiftType, asCall) = asTypes(func)
print("""
private static let %sMethod = %sClass.function(named: "%s")
public func %s(%s) -> %s {
return %s.%sMethod.call(args: [%s], kw: kw)%s
}""" %
(name, classname, name,
name, genargs(args[1:], defaults), swiftType,
classname, name, sanitise(["self"]+args[1:]), asCall))
if len(args) > 1:
print("""
public func %s(%s) -> %s {
return %s(%s, kw: kw)
}""" % (name, genargs(args[1:], defaults, "_"), swiftType,
name, ", ".join(map(lambda arg: arg+": "+reserved("_"+arg), args[1:]))))
main()
``` |
{
"source": "johnnoone/aioconsul",
"score": 2
} |
#### File: aioconsul/client/agent_endpoint.py
```python
from .bases import EndpointBase
class AgentEndpoint(EndpointBase):
"""Interact with the local Consul agent.
"""
async def info(self):
"""Returns the local node configuration
Returns:
Object: local node configuration
Returns the configuration and member information of the local agent
under the Config key, like this::
{
"Config": {
"Bootstrap": True,
"Server": True,
"Datacenter": "dc1",
"DataDir": "/tmp/consul",
"DNSRecursor": "",
"DNSRecursors": [],
"Domain": "consul.",
"LogLevel": "INFO",
"NodeName": "foobar",
"ClientAddr": "127.0.0.1",
"BindAddr": "0.0.0.0",
"AdvertiseAddr": "10.1.10.12",
"Ports": {
"DNS": 8600,
"HTTP": 8500,
"RPC": 8400,
"SerfLan": 8301,
"SerfWan": 8302,
"Server": 8300
},
"LeaveOnTerm": False,
"SkipLeaveOnInt": False,
"StatsiteAddr": "",
"Protocol": 1,
"EnableDebug": False,
"VerifyIncoming": False,
"VerifyOutgoing": False,
"CAFile": "",
"CertFile": "",
"KeyFile": "",
"StartJoin": [],
"UiDir": "",
"PidFile": "",
"EnableSyslog": False,
"RejoinAfterLeave": False
},
"Coord": {
"Adjustment": 0,
"Error": 1.5,
"Vec": [0,0,0,0,0,0,0,0]
},
"Member": {
"Name": "foobar",
"Addr": "10.1.10.12",
"Port": 8301,
"Tags": {
"bootstrap": "1",
"dc": "dc1",
"port": "8300",
"role": "consul",
"vsn": "1",
"vsn_max": "1",
"vsn_min": "1"
},
"Status": 1,
"ProtocolMin": 1,
"ProtocolMax": 2,
"ProtocolCur": 2,
"DelegateMin": 2,
"DelegateMax": 4,
"DelegateCur": 4
}
}
"""
response = await self._api.get("/v1/agent/self")
return response.body
async def disable(self, reason=None):
"""Enters maintenance mode
Parameters:
reason (str): Reason of disabling
Returns:
bool: ``True`` on success
"""
params = {"enable": True, "reason": reason}
response = await self._api.put("/v1/agent/maintenance", params=params)
return response.status == 200
async def enable(self, reason=None):
"""Resumes normal operation
Parameters:
reason (str): Reason of enabling
Returns:
bool: ``True`` on success
"""
params = {"enable": False, "reason": reason}
response = await self._api.put("/v1/agent/maintenance", params=params)
return response.status == 200
```
#### File: aioconsul/client/checks_endpoint.py
```python
from .bases import EndpointBase
from aioconsul.util import extract_attr
class ChecksEndpoint(EndpointBase):
async def items(self):
"""Returns the checks the local agent is managing
Returns:
Mapping: Mapping of checks
These checks were either provided through configuration files
or added dynamically using the HTTP API.
This endpoint returns an object like this::
{
"service:redis": {
"Node": "foobar",
"CheckID": "service:redis",
"Name": "Service 'redis' check",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "redis",
"ServiceName": "redis"
}
}
"""
response = await self._api.get("/v1/agent/checks")
return response.body
async def register(self, check, *, token=None):
"""Registers a new local check
Parameters:
check (Object): Check definition
token (ObjectID): Token ID
Returns:
bool: ``True`` on success
The register endpoint is used to add a new check to the local agent.
Checks may be of script, HTTP, TCP, or TTL type. The agent is
responsible for managing the status of the check and keeping the
Catalog in sync.
The request body must look like::
{
"ID": "mem",
"Name": "Memory utilization",
"Notes": "Ensure we don't oversubscribe memory",
"DeregisterCriticalServiceAfter": "90m",
"Script": "/usr/local/bin/check_mem.py",
"DockerContainerID": "f972c95ebf0e",
"Shell": "/bin/bash",
"HTTP": "http://example.com",
"TCP": "example.com:22",
"Interval": timedelta(seconds=10),
"TTL": timedelta(seconds=15)
}
The **Name** field is mandatory, as is one of **Script**, **HTTP**,
**TCP** or **TTL**. **Script**, **TCP** and **HTTP** also require that
**Interval** be set.
If an **ID** is not provided, it is set to **Name**. You cannot have
duplicate **ID** entries per agent, so it may be necessary to provide
an **ID**.
The **Notes** field is not used internally by Consul and is meant to
be human-readable.
Checks that are associated with a service may also contain an optional
**DeregisterCriticalServiceAfter** field, which is a timeout in the
same duration format as **Interval** and **TTL**. If a check is in the
critical state for more than this configured value, then its
associated service (and all of its associated checks) will
automatically be deregistered. The minimum timeout is 1 minute, and
the process that reaps critical services runs every 30 seconds, so it
may take slightly longer than the configured timeout to trigger the
deregistration. This should generally be configured with a timeout
that's much, much longer than any expected recoverable outage for the
given service.
If a **Script** is provided, the check type is a script, and Consul
will evaluate the script every **Interval** to update the status.
If a **DockerContainerID** is provided, the check is a Docker check,
and Consul will evaluate the script every **Interval** in the given
container using the specified Shell. Note that Shell is currently only
supported for Docker checks.
An **HTTP** check will perform an HTTP GET request against the value of
**HTTP** (expected to be a URL) every **Interval**. If the response is
any 2xx code, the check is passing. If the response is
``429 Too Many Requests``, the check is **warning**.
Otherwise, the check is **critical**.
An **TCP** check will perform an TCP connection attempt against the
value of **TCP** (expected to be an IP/hostname and port combination)
every **Interval**. If the connection attempt is successful, the check
is **passing**. If the connection attempt is unsuccessful, the check
is **critical**. In the case of a hostname that resolves to both IPv4
and IPv6 addresses, an attempt will be made to both addresses, and the
first successful connection attempt will result in a successful check.
If a **TTL** type is used, then the TTL update endpoint must be used
periodically to update the state of the check.
The **ServiceID** field can be provided to associate the registered
check with an existing service provided by the agent.
The **Status** field can be provided to specify the initial state of
the health check.
"""
token_id = extract_attr(token, keys=["ID"])
params = {"token": token_id}
response = await self._api.put("/v1/agent/check/register",
params=params,
data=check)
return response.status == 200
async def deregister(self, check):
"""Deregisters a local check
Parameters:
check (ObjectID): Check ID
Returns:
bool: ``True`` on success
The agent will take care of deregistering the check from the Catalog.
"""
check_id = extract_attr(check, keys=["CheckID", "ID"])
response = await self._api.get("/v1/agent/check/deregister", check_id)
return response.status == 200
async def passing(self, check, *, note=None):
"""Marks a local check as passing
Parameters:
note (str): Associate a human-readable message with the status
of the check
Returns:
bool: ``True`` on success
"""
return await self.mark(check, "passing", note=note)
async def warning(self, check, *, note=None):
"""Marks a local check as warning
Parameters:
note (str): Associate a human-readable message with the status
of the check
Returns:
bool: ``True`` on success
"""
return await self.mark(check, "warning", note=note)
async def critical(self, check, *, note=None):
"""Marks a local check as critical
Parameters:
note (str): Associate a human-readable message with the status
of the check
Returns:
bool: ``True`` on success
"""
return await self.mark(check, "critical", note=note)
async def mark(self, check, status, *, note=None):
"""Marks a local check as passing, warning or critical
"""
check_id = extract_attr(check, keys=["CheckID", "ID"])
data = {
"Status": status,
"Output": note
}
response = await self._api.put("/v1/agent/check/update", check_id,
data=data)
return response.status == 200
```
#### File: aioconsul/client/client.py
```python
from aioconsul.api import API
from aioconsul.common import cached_property
from .acl_endpoint import ACLEndpoint
from .agent_endpoint import AgentEndpoint
from .catalog_endpoint import CatalogEndpoint
from .checks_endpoint import ChecksEndpoint
from .coordinate_endpoint import CoordinateEndpoint
from .event_endpoint import EventEndpoint
from .health_endpoint import HealthEndpoint
from .kv_endpoint import KVEndpoint
from .members_endpoint import MembersEndpoint
from .operator_endpoint import OperatorEndpoint
from .query_endpoint import QueryEndpoint
from .services_endpoint import ServicesEndpoint
from .session_endpoint import SessionEndpoint
from .status_endpoint import StatusEndpoint
__all__ = ["Consul"]
class Consul:
def __init__(self, address, *, token=None, consistency=None, loop=None):
self.api = API(address,
token=token,
consistency=consistency,
loop=loop)
def close(self):
if self.api:
self.api.close()
__del__ = close
@property
def address(self):
return self.api.address
@property
def token(self):
return self.api.token
@token.setter
def token(self, token):
self.api.token = token
@token.deleter
def token(self):
del self.api.token
@property
def consistency(self):
return self.api.consistency
@cached_property
def acl(self):
return ACLEndpoint(self.api)
@cached_property
def agent(self):
return AgentEndpoint(self.api)
@cached_property
def catalog(self):
return CatalogEndpoint(self.api)
@cached_property
def checks(self):
return ChecksEndpoint(self.api)
@cached_property
def coordinate(self):
return CoordinateEndpoint(self.api)
@cached_property
def event(self):
return EventEndpoint(self.api)
@cached_property
def health(self):
return HealthEndpoint(self.api)
@cached_property
def kv(self):
return KVEndpoint(self.api)
@cached_property
def members(self):
return MembersEndpoint(self.api)
@cached_property
def operator(self):
return OperatorEndpoint(self.api)
@cached_property
def query(self):
return QueryEndpoint(self.api)
@cached_property
def services(self):
return ServicesEndpoint(self.api)
@cached_property
def session(self):
return SessionEndpoint(self.api)
@cached_property
def status(self):
return StatusEndpoint(self.api)
def __repr__(self):
return "<%s(%r)>" % (self.__class__.__name__, str(self.address))
```
#### File: aioconsul/client/members_endpoint.py
```python
from .bases import EndpointBase
from aioconsul.util import extract_attr
class MembersEndpoint(EndpointBase):
"""Interact with the local Consul agent.
"""
async def items(self, *, wan=None):
"""Returns the members as seen by the local serf agent
Parameters:
wan (bool): List of WAN members instead of the LAN members
Returns:
Collection: List of cluster's members
This endpoint returns an object like::
[
{
"Name": "foobar",
"Addr": "10.1.10.12",
"Port": 8301,
"Tags": {
"bootstrap": "1",
"dc": "dc1",
"port": "8300",
"role": "consul"
},
"Status": 1,
"ProtocolMin": 1,
"ProtocolMax": 2,
"ProtocolCur": 2,
"DelegateMin": 1,
"DelegateMax": 3,
"DelegateCur": 3
}
]
"""
response = await self._api.get("/v1/agent/members",
params={"wan": wan})
return response.body
async def join(self, address, *, wan=None):
"""Triggers the local agent to join a node
Parameters:
address (str): Address of node
wan (bool): Attempt to join using the WAN pool
Returns:
bool: ``True`` on success
This endpoint is used to instruct the agent to attempt to connect to
a given address. For agents running in server mode, providing ``wan``
parameter causes the agent to attempt to join using the WAN pool.
"""
response = await self._api.get("/v1/agent/join", address,
params={"wan": wan})
return response.status == 200
async def force_leave(self, node):
"""Forces removal of a node
Parameters:
node (ObjectID): Node name
Returns:
bool: ``True`` on success
This endpoint is used to instruct the agent to force a node into the
``left`` state. If a node fails unexpectedly, then it will be in a
``failed`` state. Once in the ``failed`` state, Consul will attempt to
reconnect, and the services and checks belonging to that node will not
be cleaned up. Forcing a node into the ``left`` state allows its old
entries to be removed.
"""
node_id = extract_attr(node, keys=["Node", "ID"])
response = await self._get("/v1/agent/force-leave", node_id)
return response.status == 200
```
#### File: aioconsul/client/query_endpoint.py
```python
from .bases import EndpointBase
from aioconsul.util import extract_attr
class QueryEndpoint(EndpointBase):
"""Create, update, destroy, and execute prepared queries.
Prepared queries allow you to register a complex service query and then
execute it later via its ID or name to get a set of healthy nodes that
provide a given service. This is particularly useful in combination with
Consul's DNS Interface as it allows for much richer queries than would be
possible given the limited entry points exposed by DNS.
"""
async def items(self, *, dc=None, watch=None, consistency=None):
"""Provides a listing of all prepared queries
Parameters:
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
Returns:
Collection: List of prepared queries
This returns a list of prepared queries, which looks like::
[
{
"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05",
"Name": "my-query",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
"Token": "<hidden>",
"Service": {
"Service": "redis",
"Failover": {
"NearestN": 3,
"Datacenters": ["dc1", "dc2"]
},
"OnlyPassing": False,
"Tags": ["master", "!experimental"]
},
"DNS": {
"TTL": timedelta(seconds=10)
},
"RaftIndex": {
"CreateIndex": 23,
"ModifyIndex": 42
}
}
]
"""
response = await self._api.get("/v1/query", params={"dc": dc})
return response.body
async def create(self, query, *, dc=None):
"""Creates a new prepared query
Parameters:
Query (Object): Query definition
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
Returns:
Object: New query ID
The create operation expects a body that defines the prepared query,
like this example::
{
"Name": "my-query",
"Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
"Token": "",
"Near": "node1",
"Service": {
"Service": "redis",
"Failover": {
"NearestN": 3,
"Datacenters": ["dc1", "dc2"]
},
"OnlyPassing": False,
"Tags": ["master", "!experimental"]
},
"DNS": {
"TTL": timedelta(seconds=10)
}
}
Only the **Service** field inside the **Service** structure is
mandatory, all other fields will take their default values if they
are not included.
**Name** is an optional friendly name that can be used to execute a
query instead of using its ID.
**Session** provides a way to automatically remove a prepared query
when the given session is invalidated. This is optional, and if not
given the prepared query must be manually removed when no longer
needed.
**Token**, if specified, is a captured ACL Token that is reused as the
ACL Token every time the query is executed. This allows queries to be
executed by clients with lesser or even no ACL Token, so this should
be used with care. The token itself can only be seen by clients with a
management token. If the **Token** field is left blank or omitted, the
client's ACL Token will be used to determine if they have access to the
service being queried. If the client does not supply an ACL Token, the
anonymous token will be used.
**Near** allows specifying a particular node to sort near based on
distance sorting using Network Coordinates. The nearest instance to
the specified node will be returned first, and subsequent nodes in the
response will be sorted in ascending order of estimated round-trip
times. If the node given does not exist, the nodes in the response
will be shuffled. Using the magic **_agent** value is supported, and
will automatically return results nearest the agent servicing the
request. If unspecified, the response will be shuffled by default.
The set of fields inside the **Service** structure define the
query's behavior.
**Service** is the name of the service to query. This is required.
**Failover** contains two fields, both of which are optional, and
determine what happens if no healthy nodes are available in the local
datacenter when the query is executed. It allows the use of nodes in
other datacenters with very little configuration.
If **NearestN** is set to a value greater than zero, then the query
will be forwarded to up to **NearestN** other datacenters based on
their estimated network round trip time using Network Coordinates from
the WAN gossip pool. The median round trip time from the server
handling the query to the servers in the remote datacenter is used to
determine the priority. The default value is zero. All Consul servers
must be running version 0.6.0 or above in order for this feature to
work correctly. If any servers are not running the required version of
Consul they will be considered last since they won't have any
available network coordinate information.
**Datacenters** contains a fixed list of remote datacenters to forward
the query to if there are no healthy nodes in the local datacenter.
Datacenters are queried in the order given in the list. If this option
is combined with **NearestN**, then the **NearestN** queries will be
performed first, followed by the list given by **Datacenters**. A
given datacenter will only be queried one time during a failover, even
if it is selected by both **NearestN** and is listed in
**Datacenters**. The default value is an empty list.
**OnlyPassing** controls the behavior of the query's health check
filtering. If this is set to false, the results will include nodes
with checks in the passing as well as the warning states. If this is
set to true, only nodes with checks in the passing state will be
returned. The default value is False.
**Tags** provides a list of service tags to filter the query results.
For a service to pass the tag filter it must have all of the required
tags, and none of the excluded tags (prefixed with ``!``).
The default value is an empty list, which does no tag filtering.
**TTL** in the **DNS** structure is a duration string that can use "s"
as a suffix for seconds. It controls how the TTL is set when query
results are served over DNS. If this isn't specified, then the Consul
agent configuration for the given service will be used
(see DNS Caching). If this is specified, it will take precedence over
any Consul agent-specific configuration. If no TTL is specified here
or at the Consul agent level, then the TTL will default to 0.
It returns the ID of the created query::
{
"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05"
}
"""
if "Token" in query:
# in case of a full token object...
query["Token"] = extract_attr(query["Token"], keys=["ID"])
response = await self._api.post("/v1/query",
params={"dc": dc}, data=query)
return response.body
async def read(self, query, *, dc=None, watch=None, consistency=None):
"""Fetches existing prepared query
Parameters:
query (ObjectID): Query ID
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
Returns:
Object: Query definition
Raises:
NotFound: Query does not exist
"""
query_id = extract_attr(query, keys=["ID"])
response = await self._api.get("/v1/query", query_id, params={
"dc": dc}, watch=watch, consistency=consistency)
result = response.body[0]
return result
async def update(self, query, *, dc=None):
"""Updates existing prepared query
Parameters:
Query (Object): Query definition
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
Returns:
bool: ``True`` on success
"""
query_id = extract_attr(query, keys=["ID"])
response = await self._api.put("/v1/query", query_id,
params={"dc": dc}, data=query)
return response.status == 200
async def delete(self, query, *, dc=None):
"""Delete existing prepared query
Parameters:
query (ObjectID): Query ID
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
Results:
bool: ``True`` on success
"""
query_id = extract_attr(query, keys=["ID"])
response = await self._api.delete("/v1/query", query_id,
params={"dc": dc})
return response.status == 200
async def execute(self, query, *,
dc=None, near=None, limit=None, consistency=None):
"""Executes a prepared query
Parameters:
query (ObjectID): Query ID
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
near (str): Sort the resulting list in ascending order based on
the estimated round trip time from that node
limit (int): Limit the list's size to the given number of nodes
consistency (Consistency): Force consistency
Returns:
Object:
Raises:
NotFound: the query does not exist
Returns a body like this::
{
"Service": "redis",
"Nodes": [
{
"Node": {
"Node": "foobar",
"Address": "10.1.10.12",
"TaggedAddresses": {
"lan": "10.1.10.12",
"wan": "10.1.10.12"
}
},
"Service": {
"ID": "redis",
"Service": "redis",
"Tags": None,
"Port": 8000
},
"Checks": [
{
"Node": "foobar",
"CheckID": "service:redis",
"Name": "Service 'redis' check",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "redis",
"ServiceName": "redis"
},
{
"Node": "foobar",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "",
"ServiceName": ""
}
],
"DNS": {
"TTL": timedelta(seconds=10)
},
"Datacenter": "dc3",
"Failovers": 2
}
]
}
The **Nodes** section contains the list of healthy nodes providing
the given service, as specified by the constraints of the prepared
query.
**Service** has the service name that the query was selecting. This is
useful for context in case an empty list of nodes is returned.
**DNS** has information used when serving the results over DNS. This
is just a copy of the structure given when the prepared query was
created.
**Datacenter** has the datacenter that ultimately provided the list of
nodes and **Failovers** has the number of remote datacenters that were
queried while executing the query. This provides some insight into
where the data came from. This will be zero during non-failover
operations where there were healthy nodes found in the local
datacenter.
"""
query_id = extract_attr(query, keys=["ID"])
response = await self._api.get(
"/v1/query/%s/execute" % query_id,
params={"dc": dc, "near": near, "limit": limit},
consistency=consistency)
return response.body
async def explain(self, query, *, dc=None, consistency=None):
"""Fetches existing prepared query
Parameters:
query (ObjectID): Query ID
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
Returns:
Object: the query
Raises:
NotFound: the query does not exist
"""
query_id = extract_attr(query, keys=["ID"])
path = "/v1/query/%s/explain" % query_id
response = await self._api.get(path, consistency=consistency, params={
"dc": dc})
result = response.body
return result
```
#### File: aioconsul/common/objs.py
```python
from collections import namedtuple
Response = namedtuple("Response", "url status body headers method")
class Response:
def __init__(self, path, status, body, headers, method):
self.path = path
self.status = status
self.body = body
self.headers = headers
self.method = method
def __repr__(self):
return "<%s(method=%r, path=%r, status=%r, body=%r, headers=%r)>" % (
self.__class__.__name__,
self.method,
self.path,
self.status,
self.body,
self.headers
)
def extract_body(obj):
if isinstance(obj, Response):
return obj.body
return obj
```
#### File: aioconsul/aioconsul/exceptions.py
```python
__all__ = [
"ConsulError",
"ConflictError",
"NotFound",
"SupportDisabled",
"TransactionError",
"UnauthorizedError"
]
class ConsulError(Exception):
"""Consul base error
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
def __init__(self, msg, *, meta=None):
self.value = msg
self.meta = meta or {}
if isinstance(msg, bytes):
msg = msg.decode("utf-8")
super().__init__(msg)
class NotFound(ConsulError):
"""Raised when object does not exists
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
class ConflictError(ConsulError):
"""Raised when there is a conflict in agent
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
class UnauthorizedError(ConsulError):
"""Raised when session with sufficent rights is required
Attributes:
value (Object): object of the error
meta (Meta): meta of the error
"""
class SupportDisabled(Exception):
"""Endpoint is not active.
"""
class TransactionError(Exception):
"""Raised by failing transaction
Attributes:
errors (Mapping): The errors where index is the index in operations
operations (Collection): The operations
meta (Meta): meta of the error
For example token has not the sufficient rights for writing key::
errors = {
0: {"OpIndex": 0, "What": "Permission denied"}
}
operations = [
{"Verb": "get", "Key": "foo"},
{"Verb": "set", "Key": "bar", "Value": "YmFy", "Flags": None}
]
"""
def __init__(self, errors, operations, meta, *, msg=None):
self.errors = errors
self.operations = operations
self.meta = meta
msg = msg or "Transaction failed"
super().__init__(msg)
```
#### File: aioconsul/tests/test_blocking.py
```python
import asyncio
import pytest
from collections.abc import Mapping, Sequence
@pytest.mark.asyncio
async def test_nodes(client, server, event_loop):
_, meta = await client.catalog.nodes()
watch1 = event_loop.create_task(client.catalog.nodes(watch=meta))
watch2 = event_loop.create_task(client.catalog.node(server.name,
watch=meta))
release = event_loop.create_task(client.catalog.register({
"Node": "foobar",
"Address": "192.168.10.10",
"Service": {
"ID": "foobar",
"Service": "foobar",
}
}))
await asyncio.wait([watch1, watch2, release], loop=event_loop, timeout=5)
data, _ = watch1.result()
assert isinstance(data, Sequence)
assert data[0]["Address"] == "192.168.10.10"
assert data[0]["Node"] == "foobar"
assert data[1]["Address"] == "127.0.0.1"
assert data[1]["Node"] == server.name
data, _ = watch2.result()
assert isinstance(data, Mapping)
assert data["Node"]["Address"] == "127.0.0.1"
assert data["Node"]["Node"] == server.name
data = release.result()
assert data is True
@pytest.mark.asyncio
async def test_services(client, event_loop):
_, meta = await client.catalog.services()
watch1 = event_loop.create_task(client.catalog.services(watch=meta))
watch2 = event_loop.create_task(client.catalog.service("foobar",
watch=meta))
release = event_loop.create_task(client.catalog.register({
"Node": "foobar",
"Address": "192.168.10.10",
"Service": {
"ID": "foobar",
"Service": "foobar",
}
}))
await asyncio.wait([watch1, watch2, release], loop=event_loop, timeout=5)
data, _ = watch1.result()
assert isinstance(data, Mapping)
assert "foobar" in data
data, _ = watch2.result()
assert isinstance(data, Sequence)
assert data[0]["Address"] == "192.168.10.10"
assert data[0]["Node"] == "foobar"
data = release.result()
assert data is True
```
#### File: aioconsul/tests/test_checks.py
```python
import pytest
from datetime import timedelta
@pytest.mark.asyncio
async def test_endpoint(client):
assert repr(client.checks) == "<ChecksEndpoint(%r)>" % str(client.address)
@pytest.mark.asyncio
async def test_no_checks(client):
result = await client.checks.items()
assert isinstance(result, dict)
assert not result
@pytest.mark.asyncio
async def test_check_ttl(client, server):
check = {
"ID": "foobar",
"Name": "Foobar bar check",
"TTL": timedelta(seconds=2),
}
result = await client.checks.register(check)
assert result is True
result = await client.checks.items()
assert isinstance(result, dict)
assert "foobar" in result
assert result["foobar"]["Status"] == "critical"
# TODO check in catalog that is really critical
result = await client.checks.passing(check)
assert result is True
result = await client.checks.items()
assert isinstance(result, dict)
assert "foobar" in result
assert result["foobar"]["Status"] == "passing"
# TODO check in catalog that is really passing
result = await client.checks.warning(check)
assert result is True
result = await client.checks.items()
assert isinstance(result, dict)
assert "foobar" in result
assert result["foobar"]["Status"] == "warning"
# TODO check in catalog that is really warning
result = await client.checks.critical(check)
assert result is True
result = await client.checks.items()
assert isinstance(result, dict)
assert "foobar" in result
assert result["foobar"]["Status"] == "critical"
# TODO check in catalog that is really critical
result = await client.checks.deregister(check)
assert result is True
result = await client.checks.items()
assert isinstance(result, dict)
assert "foobar" not in result
```
#### File: aioconsul/tests/test_coordinate.py
```python
import pytest
from collections.abc import Mapping, Sequence
@pytest.mark.asyncio
async def test_endpoint(client):
name = "<CoordinateEndpoint(%r)>" % str(client.address)
assert repr(client.coordinate) == name
@pytest.mark.asyncio
async def test_datacenters(client, server):
datacenters = await client.coordinate.datacenters()
assert isinstance(datacenters, Mapping)
assert server.dc in datacenters
assert datacenters[server.dc]["Datacenter"] == server.dc
assert datacenters[server.dc]["Coordinates"][0]["Node"] == server.name
@pytest.mark.asyncio
async def test_nodes(client):
nodes, meta = await client.coordinate.nodes(dc="dc1")
assert isinstance(nodes, Sequence)
assert "Index" in meta
assert "KnownLeader" in meta
assert "LastContact" in meta
``` |
{
"source": "johnnoone/aiodisque",
"score": 3
} |
#### File: aiodisque/aiodisque/connections.py
```python
import asyncio
import hiredis
from .util import parse_address, encode_command
__all__ = ['connect', 'Connection', 'ConnectionError']
def parser():
return hiredis.Reader(protocolError=ProtocolError,
replyError=ConnectionError,
encoding='utf-8')
class ConnectionError(RuntimeError):
pass
class ClosedConnectionError(ConnectionError):
pass
class ProtocolError(ConnectionError):
pass
async def connect(address, *, loop=None, closed_listeners=None):
"""Open a connection to Disque server.
"""
address = parse_address(address, host='127.0.0.1', port=7711)
if address.proto == 'tcp':
host, port = address.address
future = asyncio.open_connection(host=host, port=port, loop=loop)
elif address.proto == 'unix':
path = address.address
future = asyncio.open_unix_connection(path=path, loop=loop)
reader, writer = await future
return Connection(reader, writer,
loop=loop,
closed_listeners=closed_listeners)
class Connection:
def __init__(self, reader, writer, *, loop=None, closed_listeners=None):
self._loop = loop
self._reader = reader
self._writer = writer
self.parser = parser()
self._closed = False
self._closing = None
self._closed_listeners = closed_listeners or []
async def send_command(self, *args):
"""Send command to server
"""
if self.closed:
raise ClosedConnectionError('closed connection')
message = encode_command(*args)
self._writer.write(message)
data = await self._reader.read(65536)
if self._reader.at_eof():
self._closing = True
self._loop.call_soon(self._do_close, None)
raise ClosedConnectionError('Half closed connection')
self.parser.feed(data)
response = self.parser.gets()
if isinstance(response, ProtocolError):
self._closing = True
self._loop.call_soon(self._do_close, response)
self.parser = parser()
raise response
if isinstance(response, Exception):
raise response
if self._reader.at_eof():
self._closing = True
self._loop.call_soon(self._do_close, None)
return response
def close(self):
"""Close connection."""
self._do_close(None)
@property
def closed(self):
"""True if connection is closed."""
closed = self._closing or self._closed
if not closed and self._reader and self._reader.at_eof():
self._closing = closed = True
self._loop.call_soon(self._do_close, None)
return closed
def _do_close(self, exc):
if not self._closed:
self._closed = True
self._closing = False
self._writer.transport.close()
self._writer = None
self._reader = None
for listener in self._closed_listeners:
listener()
```
#### File: aiodisque/aiodisque/scanners.py
```python
from collections import deque
from collections.abc import AsyncIterator
__all__ = ['JobsScanner', 'QueuesScanner']
class ScannerIterator(AsyncIterator):
def __init__(self, func, func_args, func_kwargs):
self.cursor = 0
self.buffer = deque()
self.state = 'waiting'
self.func = func
self.func_args = func_args
self.func_kwargs = func_kwargs
async def __aiter__(self):
self.cursor = 0
self.state == 'waiting'
return self
async def __anext__(self):
if not self.buffer:
await self.fetch_data()
if self.buffer:
return self.buffer.popleft()
raise StopAsyncIteration()
async def fetch_data(self):
if self.state != 'finished':
self.cursor, data = await self.func(self.cursor,
*self.func_args,
**self.func_kwargs)
self.state = 'finished' if self.cursor == 0 else 'running'
self.buffer.extend(data)
class JobsScanner(ScannerIterator):
def __init__(self, client, *,
states=None, count=None, queue=None, reply=None):
"""Iter thru jobs.
Parameters:
client (Disque): disque client
count (int): An hit about how much work to do per iteration
queue (str): Return only jobs in the specified queue
states (str): Return jobs in the specified state. Can be used
multiple times for a logic OR
reply (str): Job reply type. Type can be all or id. Default is to
report just the job ID. If all is specified the full
job state is returned like for the SHOW command
"""
func = client.jscan
func_args = states or []
func_kwargs = {
'count': count,
'queue': queue,
'reply': reply
}
super().__init__(func, func_args, func_kwargs)
class QueuesScanner(ScannerIterator):
def __init__(self, client, *,
count=None, minlen=None, maxlen=None, import_rate=None):
"""Iter thru queues.
Parameters:
client (Disque): disque client
count (int): An hit about how much work to do per iteration
minlen (int): Don't return elements with less than
count jobs queued
maxlen (int): Don't return elements with more than
count jobs queued
import_rate <rate>: Only return elements with an job import rate
(from other nodes) >= rate
"""
func = client.qscan
func_args = []
func_kwargs = {
'count': count,
'minlen': minlen,
'maxlen': maxlen,
'import_rate': import_rate
}
super().__init__(func, func_args, func_kwargs)
```
#### File: aiodisque/util/addresses_util.py
```python
from functools import singledispatch
__all__ = ['Address', 'AddressError', 'parse_address']
class Address:
def __init__(self, proto, address):
self.proto = proto
self.address = address
def __eq__(self, other):
if isinstance(other, Address):
return self.proto == other.proto and self.address == other.address
def __repr__(self):
return '<Address(proto=%r, address=%r)>' % (self.proto, self.address)
class TCPAddress(Address):
proto = 'tcp'
def __init__(self, address):
self.address = address
class UnixAddress(Address):
proto = 'unix'
def __init__(self, address):
self.address = address
class AddressError(ValueError):
def __init__(self, address):
self.address = address
super().__init__('do not know how to handle %r' % [address])
@singledispatch
def parse_address(address, **kwargs):
raise AddressError(address)
@parse_address.register(Address)
def parse_addr_instance(address, **kwargs):
return address
@parse_address.register(str)
def parse_addr_str(address, *, proto=None, host=None, port=None, **kwargs):
if '://' in address:
proto, _, address = address.partition('://')
if ':' in address:
proto = proto or 'tcp'
a, _, b = address.partition(':')
host = a or host
port = b or port
address = host, int(port)
elif address.isdigit():
proto = proto or 'tcp'
port = int(address)
address = host, int(port)
elif address.startswith('/'):
proto = proto or 'unix'
else:
proto = proto or 'tcp'
host = address or host
address = host, port
if proto == 'unix':
return UnixAddress(address=address)
elif proto == 'tcp':
return TCPAddress(address=address)
@parse_address.register(int)
def parse_addr_int(address, *, host=None, **kwargs):
proto = 'tcp'
address = host, address
return Address(proto=proto, address=address)
@parse_address.register(list)
@parse_address.register(tuple)
def parse_addr_tuple(address, *, host=None, port=None, **kwargs):
proto = 'tcp'
try:
a, b = address
host = a or host
port = b or port
address = host, port
except Exception as error:
raise AddressError(address) from error
return Address(proto=proto, address=address)
```
#### File: aiodisque/tests/conftest.py
```python
import os.path
from pytest import fixture
from tempfile import TemporaryDirectory
from subprocess import Popen, PIPE, run
from time import sleep
class Configuration:
def __init__(self, **opts):
for k, v in opts.items():
setattr(self, k, v)
class DisqueNode:
def __init__(self, port, dir):
self.port = port
self.dir = dir
self.proc = None
self.socket = os.path.join(dir, 'disque.sock')
def start(self):
if not self.proc:
cmd = ["disque-server",
"--port", str(self.port),
"--dir", self.dir,
"--unixsocket", self.socket,
"--unixsocketperm", "755"]
self.proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
cmd = ['disque', '-p', str(self.port), 'info']
while True:
sleep(.01)
if self.proc.poll():
raise Exception('already stopped!', self.proc.stderr)
resp = run(cmd, stdout=PIPE, stderr=PIPE)
if not resp.returncode:
break
def stop(self):
self.proc.kill()
self.proc = None
@property
def configuration(self):
return Configuration(port=self.port, dir=self.dir, socket=self.socket)
@fixture(scope='function')
def node(request):
tmp_dir = TemporaryDirectory()
node = DisqueNode(port=7711, dir=tmp_dir.name)
node.start()
def teardown():
node.stop()
tmp_dir.cleanup()
request.addfinalizer(teardown)
return node.configuration
```
#### File: aiodisque/tests/test_addresses.py
```python
import pytest
from aiodisque.util import parse_address, Address, AddressError
ok = [
('1.2.3.4', Address(proto='tcp', address=('1.2.3.4', 7711))),
('tcp://1.2.3.4', Address(proto='tcp', address=('1.2.3.4', 7711))),
('1172.16.17.32:', Address(proto='tcp', address=('1237.0.0.1', 7711))),
(('1237.0.0.1', None), Address(proto='tcp', address=('1172.16.17.32', 7711))),
(['1237.0.0.1', None], Address(proto='tcp', address=('1237.0.0.1', 7711))),
(':', Address(proto='tcp', address=('127.0.0.1', 7711))),
(':12', Address(proto='tcp', address=('127.0.0.1', 12))),
('12', Address(proto='tcp', address=('127.0.0.1', 12))),
('errorist.xyz', Address(proto='tcp', address=('errorist.xyz', 7711))),
(12, Address(proto='tcp', address=('127.0.0.1', 12))),
('/tmp/disque.sock', Address(proto='unix', address='/tmp/disque.sock')),
('unix:///foo/bar.sock', Address(proto='unix', address='/foo/bar.sock')),
(Address(proto='foo', address='bar'), Address(proto='foo', address='bar')),
]
fail = [('a',), ('a', 'b', 'c'), ['a'], ['a', 'b', 'c'], {}]
@pytest.mark.parametrize("input,expected", ok)
def test_parse_ok(input, expected):
assert parse_address(input, host='127.0.0.1', port=7711) == expected
@pytest.mark.parametrize("input", fail)
def test_parse_fail(input):
with pytest.raises(AddressError):
parse_address(input)
```
#### File: aiodisque/tests/test_queues.py
```python
import pytest
from aiodisque import Disque
from aiodisque.queues import JobsQueue
@pytest.mark.asyncio
async def test_get(node, event_loop):
client = Disque(node.port, loop=event_loop)
queue = JobsQueue('q', client, loop=event_loop)
await client.addjob('q', 'job', 5000, replicate=1, retry=0)
job = await queue.get()
assert hasattr(job, 'id')
assert hasattr(job, 'body')
assert hasattr(job, 'body')
assert hasattr(job, 'queue')
assert not hasattr(job, 'nacks')
assert not hasattr(job, 'additional_deliveries')
@pytest.mark.asyncio
async def test_get_nowait(node, event_loop):
client = Disque(node.port, loop=event_loop)
queue = JobsQueue('q', client, loop=event_loop)
with pytest.raises(NotImplementedError):
queue.get_nowait()
@pytest.mark.asyncio
async def test_put(node, event_loop):
client = Disque(node.port, loop=event_loop)
queue = JobsQueue('q', client, loop=event_loop)
await queue.put('job')
job = await client.getjob('q')
assert hasattr(job, 'id')
assert hasattr(job, 'body')
assert hasattr(job, 'body')
assert hasattr(job, 'queue')
assert not hasattr(job, 'nacks')
assert not hasattr(job, 'additional_deliveries')
@pytest.mark.asyncio
async def test_put_nowait(node, event_loop):
client = Disque(node.port, loop=event_loop)
queue = JobsQueue('q', client, loop=event_loop)
with pytest.raises(NotImplementedError):
queue.put_nowait('job')
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.