path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
NASH_realExample.ipynb | ###Markdown
1. simulation(check)2. 0.5 (check)3. nonlinear (wait for code) (check) Linear combination Choose first var to be anchor
###Code
[roc_auc_score(dat_1.NASH, dat_1.iloc[:,i]) for i in range(4)]
#[0.4643326758711374,
# 0.4276791584483892,
# 0.7143326758711374,
# 0.4819197896120973]
[NonpAUC(X0.iloc[:,i],X1.iloc[:,i]) for i in range(4)]
def indicator(s0,s1):
"""Indicator function
s0: scalar
s1:scalar
return scalar
"""
if s0 == s1: return 0.5
elif s1 > s0: return 1
else: return 0
def NonpAUC(s0,s1):
"""compute the nonparametruc AUC.
s1: array of composite scores for class '1'.
s0: array of composite scores for class '0'
return scalar auc
"""
n1 = len(s1)
n0 = len(s0)
ans = 0
for x1 in s1:
for x0 in s0:
ans += indicator(x0,x1)
return ans/(n1*n0)
####. AUC
### method1, more convient in suliu
#def NonpAUC(s0, s1):
# """
# compute the nonparametruc AUC.
# s1: array of composite scores for class '1'.
# s0: array of composite scores for class '0'
# return scalar auc
# """
# n1 = len(s1)
# n0 = len(s0)
# return sum([sum(x> s0) for x in s1])/ (n1*n0)
# method 2
## actually does not need that function,
# roc_auc_score(y, y_pred) directly get nonpAUC
# method 3, can plot ROC curve
#fpr, tpr, thresholds = roc_curve(y, y_pred, pos_label=1)
#metrics.auc(fpr, tpr)
# method I: plt
#plt.title('Receiver Operating Characteristic')
#plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
#plt.legend(loc = 'lower right')
#plt.plot([0, 1], [0, 1],'r--')
#plt.xlim([0, 1])
#plt.ylim([0, 1])
#plt.ylabel('True Positive Rate')
#plt.xlabel('False Positive Rate')
#plt.show()
NonpAUC(np.array([10]),np.array([11]) )
### anchor var
def anchor_est_coef(coef):
"""
set the first var as anchor var, which has coef 1.
coef: array(cannot be list)
return coef_, array.
"""
# if anchor coef <0
coef_ = np.array(coef)
return coef_/abs(coef_[0]) # abs guarantee it is increasing transformation
###Output
_____no_output_____
###Markdown
Su and Liu's method
###Code
def suliu(X0, X1, bool = True):
"""
X0: df or array, design matrix for class '0'
X1: df or array, design matrix for class '1'
"""
a = np.cov(X0, rowvar= False) + np.cov(X1, rowvar= False)
b = X1.mean().to_numpy() - X0.mean().to_numpy()
est_coef = np.matmul(inv(a),b)
if bool:
est_coef = anchor_est_coef(est_coef)
#
Y0 = np.matmul(X0.to_numpy(), est_coef); Y1 = np.matmul(X1.to_numpy(), est_coef)
auc = NonpAUC(Y0, Y1)
if auc >=0.5:
return est_coef, auc
else:
return -est_coef, 1-auc
coef, auc = suliu(X0,X1, bool =False)
print('estimated coef is %s' % coef)
print('empirical AUC is %s' % auc)
coef, auc = suliu(X0,X1)
print('estimated coef is %s' % coef)
print('empirical AUC is %s' % auc)
###Output
estimated coef is [1. 0.66034715 0.37643963 1.02659137]
empirical AUC is 0.6620644312952005
###Markdown
random forest
###Code
def randomforst(X0,X1):
n0 = X0.shape[0]
n1 = X1.shape[0]
X = pd.concat([X0,X1])
y = [0] * n0
y.extend([1]*n1); y = np.array(y)
rf =RandomForestClassifier(max_depth=2, random_state=43).fit(X,y)
##
y_pred = rf.predict_proba(X)[:,1]
auc = roc_auc_score(y, y_pred)
#print(NonpAUC(y_pred[:n0], y_pred[n0:]))
#feature_importances = rf.feature_importances_
return rf, auc ## return model, for future prediction
mod_rf, auc = randomforst(X0,X1)
#print('estimated feature_importances_ is %s' % coef)
print('empirical AUC is %s' % auc)
###Output
0.7943786982248521
empirical AUC is 0.7943786982248521
###Markdown
SVM_r
###Code
def svm_r(X0,X1):
"""svm with rbf kernel
X0: df, design matrix for class '0'
X1: df, design matrix for class '1'
"""
n0 = X0.shape[0]
n1 = X1.shape[0]
X = pd.concat([X0,X1])
y = [0] * n0
y.extend([1]*n1); y = np.array(y)
mod = SVC(kernel = 'rbf',random_state=42, probability= True).fit(X,y)
##
y_pred = mod.predict_proba(X)[:,1]
auc = roc_auc_score(y, y_pred)
return mod, auc ## cannot return estimates, so return mod for future prediction
mod_svm,auc = svm_r(X0,X1)
print('empirical AUC is %s' % auc)
###Output
empirical AUC is 0.723044049967127
###Markdown
SVM_l
###Code
def svm_l(X0,X1):
"""svm with linear kernel
X0: df, design matrix for class '0'
X1: df, design matrix for class '1'
"""
n0 = X0.shape[0]
n1 = X1.shape[0]
X = pd.concat([X0,X1])
y = [0] * n0
y.extend([1]*n1); y = np.array(y)
mod = SVC(kernel = 'linear',random_state=0, probability= True).fit(X,y)
##
y_pred = mod.predict_proba(X)[:,1]
auc = roc_auc_score(y, y_pred)
return mod, auc ## cannot return estimates, so return mod for future prediction
mod_svm,auc = svm_l(X0,X1)
print('empirical AUC is %s' % auc)
###Output
empirical AUC is 0.7140861275476659
###Markdown
Logistic regression
###Code
def logistic(X0,X1, bool = True):
n0 = X0.shape[0]
n1 = X1.shape[0]
X = pd.concat([X0,X1])
y = [0] * n0
y.extend([1]*n1); y = np.array(y)
lr = LR(random_state=0).fit(X,y)
##
y_pred = lr.predict_proba(X)[:,1]
auc = roc_auc_score(y, y_pred)
est_coef = lr.coef_[0]
if bool:
est_coef = anchor_est_coef(est_coef)
if auc >=0.5:
return est_coef, auc
else:
return -est_coef, 1-auc
coef, auc = logistic(X0,X1,bool = False)
print('estimated coef is %s' % coef)
print('empirical AUC is %s' % auc)
coef, auc = logistic(X0,X1)
print('estimated coef is %s' % coef)
print('empirical AUC is %s' % auc)
###Output
estimated coef is [ 1. 1.70201691 3.71633799 -4.83960459]
empirical AUC is 0.7140039447731755
###Markdown
Pepe method
###Code
def nonp_combine2_auc(l1,l2, X0, X1):
"""
compute nonparametric AUC when X0 and X1 has two cols for given coef (l1,l2)
l1: first coef
l2: second coef
X0: df, design matrix for class '0'
X1: df, design matrix for class '1'
"""
n0 = X0.shape[0]
n1 = X1.shape[0]
s0 = np.matmul(X0.to_numpy(), np.array([l1,l2]))
s1 = np.matmul(X1.to_numpy(), np.array([l1,l2]))
return NonpAUC(s0,s1)
def pepe(X0,X1, evalnum = 201, bool = True):
"""
compute the coef that has max nonparametric AUC, X0 and X1 has two cols.
X0: df, design matrix for class '0'
X1: df, design matrix for class '1'
"""
l = np.linspace(start=-1, stop=1, num=evalnum)
#l2 = np.linspace(start=-1, stop=1, num=evalnum)
auc_l1 = [nonp_combine2_auc(e,1,X0,X1) for e in l]
auc_l2 = [nonp_combine2_auc(1,e,X0,X1) for e in l]
if max(auc_l1) > max(auc_l2):
ind = auc_l1.index(max(auc_l1))
est_coef = np.array([l[ind],1])
if bool:
est_coef = anchor_est_coef(est_coef)
return est_coef, max(auc_l1)
else:
ind = auc_l2.index(max(auc_l2))
est_coef = np.array([1,l[ind]])
if bool:
est_coef = anchor_est_coef(est_coef)
return est_coef, max(auc_l2)
X0_2 = X0.loc[:,['FIB4','LSVR']]; X1_2 = X1.loc[:,['FIB4','LSVR']]
coef, auc = pepe(X0_2,X1_2, bool = False)
print('estimated coef is %s' % coef)
print('empirical AUC is %s' % auc)
###Output
estimated coef is [ 1. -0.76]
empirical AUC is 0.7164694280078896
###Markdown
Min-max
###Code
def liu(X0, X1, bool = True):
"""
X0: df, design matrix for class '0'
X1: df, design matrix for class '1'
"""
# get min max row-wise
max_min_X0 = np.concatenate( ( np.amax(X0.to_numpy(), axis=1).reshape(-1,1) , np.amin(X0.to_numpy(), axis=1).reshape(-1,1) ), axis =1 )
max_min_X1 = np.concatenate( ( np.amax(X1.to_numpy(), axis=1).reshape(-1,1) , np.amin(X1.to_numpy(), axis=1).reshape(-1,1) ), axis =1 )
max_min_X0 = pd.DataFrame(data = max_min_X0); max_min_X1 = pd.DataFrame(data = max_min_X1)
return pepe(max_min_X0, max_min_X1, bool = bool)
coef, auc = liu(X0,X1, bool = False)
print('estimated coef is %s' % coef)
print('empirical AUC is %s' % auc)
###Output
estimated coef is [ 1. -0.94]
empirical AUC is 0.6338757396449705
###Markdown
stepwise
###Code
def auc_check(X0, X1):
"""calculate AUC for every var
X0: df, design matrix for class '0'
X1: df, design matrix for class '1'
"""
p = X0.shape[1]
auc_list = []
for i in list(X0.columns):
auc_list.append(NonpAUC(X0.loc[:,i], X1.loc[:,i] ) )
return auc_list
auc_check(X0,X1)
def stepwise(X0, X1, bool = True):
n0 = X0.shape[0]
n1 = X1.shape[0]
varnum = X0.shape[1]
combcoef = []
## step down
auc_order = np.array(auc_check(X0,X1))
sort_index = np.argsort(auc_order); sort_index= sort_index[::-1] ## auc_order[sort_index[0]] largest, auc_order[sort_index[len()]] smallest
combmarker0 = X0.iloc[:,[sort_index[0]]].copy() # pd
combmarker1 = X1.iloc[:,[sort_index[0]]].copy() # pd
nal_coef = [1]
for i in range(1,varnum):
#combmarker0 = pd.concat([combmarker0, X0.iloc[:, [sort_index[i]] ] ], axis= 1,ignore_index = True)
#combmarker1 = pd.concat([combmarker1, X1.iloc[:, [sort_index[i]] ] ], axis= 1,ignore_index = True)
combmarker0['new'] = X0.iloc[:, [ sort_index[i] ] ].to_numpy()
combmarker1['new'] = X1.iloc[:, [ sort_index[i] ] ].to_numpy()
temp_inf , _ = pepe(combmarker0,combmarker1, bool= False)
#print(temp_inf)
combcoef.append(temp_inf)
nal_coef = temp_inf[0]*np.array(nal_coef); nal_coef = list(nal_coef); nal_coef.append(temp_inf[1])
combmarker0 = pd.DataFrame(data = np.matmul( combmarker0.to_numpy(), temp_inf))
combmarker1 = pd.DataFrame(data = np.matmul( combmarker1.to_numpy(), temp_inf))
est_coef = np.array([0.]*varnum) ## None has dtype problem, 0. makes float dtype.
for i in range(varnum):
est_coef[sort_index[i]] = nal_coef[i]
auc = NonpAUC( np.matmul(X0.to_numpy() ,est_coef ) , np.matmul(X1.to_numpy() ,est_coef ))
if auc >=0.5:
return est_coef, auc
else:
return -est_coef, 1-auc
coef, auc = stepwise(X0,X1)
print('estimated coef is %s' % coef)
print('empirical AUC is %s' % auc)
coef
###Output
_____no_output_____
###Markdown
Resubstitution
###Code
class AllMethod:
def __init__(self, method, bool_trans = True):
"""
method: a string, specify which linear combination method to use. ['suliu', 'pepe', 'min-max','stepwise', 'rf', 'svml', 'svmr', 'logistic']
bool_trans: whether to perform log transformation
"""
self.method = method
self.bool_trans = bool_trans
def fit(self, X0, X1):
"""Train the model
X0: df, design matrix for class '0'
X1: df, design matrix for class '1'
return: self,
obtain self.coef_ or self.mod, and self.fitted_auc
"""
if self.bool_trans:
X0 = np.log(X0); X1 = np.log(X1)
if self.method == 'suliu':
self.coef_, self.fiited_auc_ = suliu(X0,X1,bool=False)
elif self.method == 'logistic':
self.coef_, self.fiited_auc_ = logistic(X0,X1,bool=False)
elif self.method == 'min-max':
self.coef_, self.fiited_auc_ = liu(X0,X1,bool=False)
elif self.method == 'stepwise':
self.coef_, self.fiited_auc_ = stepwise(X0,X1,bool=False)
elif self.method == 'pepe':
if X0.shape[1] != 2:
raise ValueError("Passed array is not of the right shape")
self.coef_, self.fiited_auc_ = pepe(X0,X1,bool=False)
elif self.method == 'svml':
self.mod_, self.fiited_auc_ = svm_l(X0,X1)
elif self.method == 'svmr':
self.mod_, self.fiited_auc_ = svm_r(X0,X1)
elif self.method == 'rf':
self.mod_, self.fiited_auc_ = randomforst(X0,X1)
return self
def predict(self, X0, X1):
"""predict
X0: df, design matrix for class '0'
X1: df, design matrix for class '1'
return: y0, y1
"""
if self.bool_trans:
X0 = np.log(X0); X1 = np.log(X1)
if self.method in ['rf', 'svml', 'svmr']: ## no self.coef_ , self.mod_
y0 = self.mod_.predict_proba(X0)[:,1]
y1 = self.mod_.predict_proba(X1)[:,1]
auc = NonpAUC(y0, y1)
return y0, y1, auc
else: ## other methods, which return self.coef_
if self.method == 'min-max':
max_min_X0 = np.concatenate( ( np.amax(X0.to_numpy(), axis=1).reshape(-1,1) , np.amin(X0.to_numpy(), axis=1).reshape(-1,1) ), axis =1 )
max_min_X1 = np.concatenate( ( np.amax(X1.to_numpy(), axis=1).reshape(-1,1) , np.amin(X1.to_numpy(), axis=1).reshape(-1,1) ), axis =1 )
X0 = pd.DataFrame(data = max_min_X0); X1 = pd.DataFrame(data = max_min_X1)
y0 = np.matmul(X0.to_numpy(), self.coef_ )
y1 = np.matmul(X1.to_numpy(), self.coef_ )
auc = NonpAUC(y0, y1)
return y0, y1, auc
def roc_plot(self, X0, X1):
#if self.bool_trans:
# X0 = np.log(X0); X1 = np.log(X1) ## in self.predict, already did the transformation!
n0 = X0.shape[0]; n1 = X1.shape[0]
y0, y1, auc = self.predict(X0,X1); #print(y0); print(y1)
y = [0] * n0
y.extend([1]*n1); y = np.array(y); #print(y)
y_pred = np.concatenate((y0,y1)); #print(y_pred)
fpr, tpr, thresholds = roc_curve(y, y_pred, pos_label=1)
plt.title('Receiver Operating Characteristic, Method: % s' % self.method)
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
lc = AllMethod(method= 'suliu')
lc.fit(X0,X1)
print('estimated coef is %s' % lc.coef_)
print('fitted AUC is %s' % lc.fiited_auc_)
lc.roc_plot(X0,X1)
lc = AllMethod(method= 'logistic')
lc.fit(X0,X1)
print('estimated coef is %s' % lc.coef_)
print('fitted AUC is %s' % lc.fiited_auc_)
lc.roc_plot(X0,X1)
lc = AllMethod(method= 'rf')
lc.fit(X0,X1)
#print('estimated coef is %s' % lc.coef_)
print('fitted AUC is %s' % lc.fiited_auc_)
lc.roc_plot(X0,X1)
lc = AllMethod(method= 'svml')
lc.fit(X0,X1)
#print('estimated coef is %s' % lc.coef_)
print('fitted AUC is %s' % lc.fiited_auc_)
lc.roc_plot(X0,X1)
lc = AllMethod(method= 'svmr')
lc.fit(X0,X1)
#print('estimated coef is %s' % lc.coef_)
print('fitted AUC is %s' % lc.fiited_auc_)
lc.roc_plot(X0,X1)
lc = AllMethod(method= 'stepwise')
lc.fit(X0,X1)
print('estimated coef is %s' % lc.coef_)
print('fitted AUC is %s' % lc.fiited_auc_)
lc.roc_plot(X0,X1)
lc = AllMethod(method= 'min-max')
lc.fit(X0,X1)
print('estimated coef is %s' % lc.coef_)
print('fitted AUC is %s' % lc.fiited_auc_)
lc.roc_plot(X0,X1)
lc = AllMethod(method= 'pepe')
lc.fit(X0_2,X1_2)
print('estimated coef is %s' % lc.coef_)
print('fitted AUC is %s' % lc.fiited_auc_)
lc.roc_plot(X0_2,X1_2)
###Output
estimated coef is [ 1. -0.09]
fitted AUC is 0.7159763313609467
###Markdown
LOOPV $\hat{AUC}^{cv} = \frac{1}{n_1 n_2} \sum_i \sum_j I(X_i \hat{c}^{-ij} < Y_j \hat{c}^{-ij})$, 10-fold CV can be applied instead of LOPO CV to gain efficiency
###Code
X = dat_1.loc[:, ['R1-NASH', 'R2-NASH', 'FIB4','LSVR']]
Y = dat_1.loc[:,'NASH']
def helper(X, Y):
"""Take X, Y, return X0 and X1
X: df/array
Y: df.series
return X0, X1
"""
#try:
X0 = X.loc[Y == 0].copy()
#except:
# X0 = X[Y == 0]
#try:
X1 = X.loc[Y == 1].copy()
#except:
#X1 = X[Y == 1]
return X0,X1
def get_cv(method,bool_trans,X,Y,n_splits=10,cv_type = "StratifiedKFold",verbose = True):
"""Cross validation to get AUC.
method: str, ['suliu', 'pepe', 'min-max','stepwise', 'logistic']
X: design matrix
Y: labels
bool_trans: whether applied log transformation of X
"""
if cv_type == "StratifiedKFold":
cv = StratifiedKFold(n_splits= n_splits, shuffle=True, random_state=42) # The folds are made by preserving the percentage of samples for each class.
else:
cv = KFold(n_splits=n_splits, shuffle=True, random_state=42)
model = AllMethod(method= method, bool_trans= bool_trans)
#training_time = []
AUC = []
for folder, (train_index, val_index) in enumerate(cv.split(X, Y)):
X_train,X_val = X.iloc[train_index],X.iloc[val_index]
y_train,y_val = Y.iloc[train_index],Y.iloc[val_index]
#
X0_train, X1_train = helper(X_train, y_train); X0_val, X1_val = helper(X_val, y_val)
model.fit(X0_train,X1_train)
_,_, auc = model.predict(X0_val,X1_val)
AUC.append(auc)
if verbose:
#print('estimated coef is %s' % model.coef_)
print('fitted AUC is %s' % model.fiited_auc_)
print("test auc in %s fold is %s" % (folder+1,auc) )
print('____'*10)
return AUC
AUC = get_cv('svml',True,X,Y, 10)
print("%s +/- %s" % (np.mean(AUC), np.std(AUC) ) )
AUC = get_cv('svmr',True,X,Y, 10)
print("%s +/- %s" % (np.mean(AUC), np.std(AUC) ) )
AUC = get_cv('rf',True,X,Y, 10)
print("%s +/- %s" % (np.mean(AUC), np.std(AUC) ) )
AUC = get_cv('suliu',True,X,Y, 10)
print("%s +/- %s" % (np.mean(AUC), np.std(AUC) ) )
AUC = get_cv('logistic',True,X,Y, 10)
print("%s +/- %s" % (np.mean(AUC), np.std(AUC) ) )
AUC = get_cv('min-max',True,X,Y, 10)
print("%s +/- %s" % (np.mean(AUC), np.std(AUC) ) )
AUC = get_cv('stepwise',True,X,Y, 10)
print("%s +/- %s" % (np.mean(AUC), np.std(AUC) ) )
X_2 = X.loc[:,['FIB4','LSVR']]
AUC = get_cv('pepe',True,X_2,Y, 10)
print("%s +/- %s" % (np.mean(AUC), np.std(AUC) ) )
###Output
estimated coef is [1. 0.15]
fitted AUC is 0.7171428571428572
test auc in 1 fold is 0.703125
________________________________________
estimated coef is [1. 0.15]
fitted AUC is 0.716530612244898
test auc in 2 fold is 0.6875
________________________________________
estimated coef is [ 1. -0.09]
fitted AUC is 0.7446938775510205
test auc in 3 fold is 0.5
________________________________________
estimated coef is [ 1. -0.23]
fitted AUC is 0.7073469387755102
test auc in 4 fold is 0.703125
________________________________________
estimated coef is [ 1. -1.]
fitted AUC is 0.6989795918367347
test auc in 5 fold is 0.734375
________________________________________
estimated coef is [1. 0.15]
fitted AUC is 0.71
test auc in 6 fold is 0.796875
________________________________________
estimated coef is [1. 0.14]
fitted AUC is 0.7259557344064387
test auc in 7 fold is 0.5892857142857143
________________________________________
estimated coef is [ 1. -0.23]
fitted AUC is 0.7209255533199195
test auc in 8 fold is 0.7142857142857143
________________________________________
estimated coef is [1. 0.15]
fitted AUC is 0.7116700201207243
test auc in 9 fold is 0.6964285714285714
________________________________________
estimated coef is [1. 0.15]
fitted AUC is 0.7189134808853118
test auc in 10 fold is 0.7321428571428571
________________________________________
0.6857142857142857 +/- 0.07887269757951346
###Markdown
Simulation Multivariate normal with equal variance
###Code
_ = MonteCarlo_1(10, 20, 20, u0, u1, sigma, sigma)
u0 = [0.1,0.1,0.1, 0.1]; u1 = [0.6, 0.8, 1, 1.2]
sigma = [[1,0.5,0.5,0.5],
[0.5,1,0.5,0.5],
[0.5,0.5,1,0.5],
[0.5,0.5,0.5,1]]
def MonteCarlo_1(T, n0, n1, u0, u1, sigma0, sigma1, log_bool = False):
"""simulation for first scenario: multivarite normal with equal variance
T: number of simulation
n0: sample size of class 0
n1: sample size of class 1
"""
AUC = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} ## same num as simulation time
methods = ['suliu', 'logistic', 'stepwise','min-max', 'rf', 'svml', 'svmr']
for i in range(T):
### one monto carlo simulation of size n0 + n1
#i = 10
np.random.seed(seed= 100*i+ 4*i)
X0 = multivariate_normal(u0, sigma0, size = n0)
X1 = multivariate_normal(u1, sigma1, size = n1)
if log_bool:
X0 = np.exp(X0)
X1 = np.exp(X1)
#
X = np.concatenate([X0,X1])
y = [0] * n0
y.extend([1]*n1); y = np.array(y) ## X,y is one simulation
X = pd.DataFrame(data = X); y = pd.Series(y)
## within that particular MC simulation, do 10 folds CV
cv = StratifiedKFold(n_splits= 10, shuffle=True, random_state=42)
AUC_folds = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} # same number as folders
#
for folder, (train_index, val_index) in enumerate(cv.split(X, y)):
X_train,X_val = X.iloc[train_index],X.iloc[val_index]
y_train,y_val = y.iloc[train_index],y.iloc[val_index]
#
X0_train, X1_train = helper(X_train, y_train); X0_val, X1_val = helper(X_val, y_val)
for method in methods:
model = AllMethod(method= method, bool_trans= False).fit(X0_train,X1_train)
_,_, auc = model.predict(X0_val,X1_val)
AUC_folds[method].append(auc)
#print(AUC_folds)
for key, val in AUC_folds.items():
AUC[key].append( np.mean(np.array(val) ))
print({key: (np.mean(np.array(val)) ,np.std(np.array(val))) for key,val in AUC.items()})
return AUC
###Output
_____no_output_____
###Markdown
Setting A
###Code
_ = MonteCarlo_1(1000, 20, 20, u0, u1, sigma, sigma)
_ = MonteCarlo_1(1000, 20, 30, u0, u1, sigma, sigma)
_ = MonteCarlo_1(1000, 50, 50, u0, u1, sigma, sigma)
###Output
{'suliu': (0.786708, 0.054775274860104556), 'logistic': (0.7880280000000001, 0.054243149023632496), 'stepwise': (0.780832, 0.057442108039312094), 'min-max': (0.753992, 0.0542533311051036), 'rf': (0.770222, 0.056831986732825045), 'svml': (0.7854140000000001, 0.05588384922318793), 'svmr': (0.7533920000000001, 0.07719796847067935)}
###Markdown
Setting B
###Code
u0 = [0.1,0.1,0.1, 0.1]; u1 = [1.1, 1.4, 1.7, 2]
sigma = [[1,0.5,0.5,0.5],
[0.5,1,0.5,0.5],
[0.5,0.5,1,0.5],
[0.5,0.5,0.5,1]]
_ = MonteCarlo_1(1000, 20, 20, u0, u1, sigma, sigma)
_ = MonteCarlo_1(1000, 20, 30, u0, u1, sigma, sigma)
_ = MonteCarlo_1(1000, 50, 50, u0, u1, sigma, sigma)
###Output
{'suliu': (0.921284, 0.030369974382603606), 'logistic': (0.9227240000000001, 0.0301360220334403), 'stepwise': (0.9190799999999999, 0.03145767950755426), 'min-max': (0.8989040000000001, 0.03385543950386702), 'rf': (0.912004, 0.03285239692929573), 'svml': (0.920322, 0.030783442237670555), 'svmr': (0.906374, 0.035153038616882036)}
###Markdown
Multivariate with uneual variance
###Code
u0 = [0.1,0.1,0.1, 0.1]; u1 = [0.6, 0.8, 1, 1.2]
sigma0 = [[1,0.3,0.3,0.3],
[0.3,1,0.3,0.3],
[0.3,0.3,1,0.3],
[0.3,0.3,0.3,1]]
sigma1 = [[1,0.7,0.7,0.7],
[0.7,1,0.7,0.7],
[0.7,0.7,1,0.7],
[0.7,0.7,0.7,1]]
###Output
_____no_output_____
###Markdown
setting A
###Code
_ = MonteCarlo_1(1000, 20, 20, u0, u1, sigma0, sigma1)
_ = MonteCarlo_1(1000, 20, 30, u0, u1, sigma0, sigma1)
_ = MonteCarlo_1(1000, 50, 50, u0, u1, sigma0, sigma1)
###Output
{'suliu': (0.787756, 0.05329184237760975), 'logistic': (0.789188, 0.053226879074392504), 'stepwise': (0.781864, 0.0563947293991203), 'min-max': (0.803432, 0.048606803803582906), 'rf': (0.786126, 0.05288731534120447), 'svml': (0.78617, 0.05482575216082314), 'svmr': (0.806996, 0.053146664843619316)}
###Markdown
Setting B
###Code
u0 = [0.1,0.1,0.1, 0.1]; u1 = [1.1, 1.4, 1.7, 2]
sigma0 = [[1,0.3,0.3,0.3],
[0.3,1,0.3,0.3],
[0.3,0.3,1,0.3],
[0.3,0.3,0.3,1]]
sigma1 = [[1,0.7,0.7,0.7],
[0.7,1,0.7,0.7],
[0.7,0.7,1,0.7],
[0.7,0.7,0.7,1]]
_ = MonteCarlo_1(1000, 20, 20, u0, u1, sigma0, sigma1)
_ = MonteCarlo_1(1000, 20, 30, u0, u1, sigma0, sigma1)
_ = MonteCarlo_1(1000, 50, 50, u0, u1, sigma0, sigma1)
###Output
{'suliu': (0.9217560000000001, 0.0310952804135933), 'logistic': (0.9230640000000001, 0.030761857941288252), 'stepwise': (0.919452, 0.03167995732320356), 'min-max': (0.9034880000000001, 0.03422738459187321), 'rf': (0.913842, 0.03188797635473282), 'svml': (0.9209820000000001, 0.031100862946227062), 'svmr': (0.918258, 0.03170024346909656)}
###Markdown
Log normal with unequal variance Setting A
###Code
_ = MonteCarlo_1(1000, 20, 20, u0, u1, sigma0, sigma1, True)
_ = MonteCarlo_1(1000, 20, 30, u0, u1, sigma0, sigma1, True)
_ = MonteCarlo_1(1000, 50, 50, u0, u1, sigma0, sigma1, True)
###Output
{'suliu': (0.748332, 0.058799470881973105), 'logistic': (0.7629, 0.05759741313635538), 'stepwise': (0.76634, 0.060471583409069114), 'min-max': (0.804064, 0.04958995769306526), 'rf': (0.7860839999999999, 0.05273314843625405), 'svml': (0.766008, 0.06104083826423096), 'svmr': (0.749634, 0.0873703270223936)}
###Markdown
Setting B
###Code
u0 = [0.1,0.1,0.1, 0.1]; u1 = [1.1, 1.4, 1.7, 2]
sigma0 = [[1,0.3,0.3,0.3],
[0.3,1,0.3,0.3],
[0.3,0.3,1,0.3],
[0.3,0.3,0.3,1]]
sigma1 = [[1,0.7,0.7,0.7],
[0.7,1,0.7,0.7],
[0.7,0.7,1,0.7],
[0.7,0.7,0.7,1]]
_ = MonteCarlo_1(1000, 20, 20, u0, u1, sigma0, sigma1, True)
_ = MonteCarlo_1(1000, 20, 30, u0, u1, sigma0, sigma1, True)
_ = MonteCarlo_1(1000, 50, 50, u0, u1, sigma0, sigma1, True)
###Output
{'suliu': (0.877152, 0.04465889492587114), 'logistic': (0.905796, 0.035051824260657234), 'stepwise': (0.9061400000000001, 0.03596682360175832), 'min-max': (0.9016600000000001, 0.034587113207089126), 'rf': (0.913792, 0.03194189624928363), 'svml': (0.906382, 0.03500437224119294), 'svmr': (0.9034099999999999, 0.037322163656465575)}
###Markdown
normal distribution, with logit(P(Y|X)) = x1 - x2 - x3 + (x1-x2)^2 - x4^4
###Code
def MonteCarlo_3(T, n, u, sigma):
"""simulation for last scenario: generate X first from normal, then generate y via logit(Y|X) = 10* ((sinpi*x1) + ... )
T: number of simulation
n: sample size
"""
AUC = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} ## same num as simulation time
methods = ['suliu', 'logistic', 'stepwise','min-max', 'rf', 'svml', 'svmr']
for i in range(T):
### one monto carlo simulation of size n0 + n1
np.random.seed(seed= 100*i+ 4*i)
X = multivariate_normal(u, sigma, size = n); #X = np.exp(X)
X_trans = [ele[0] - ele[1] - ele[2]+ (ele[0] - ele[1])**2 - ele[3]**4 for ele in X] ## x1 - x2 - x3 + (x1-x2)^2 - x4^4
p = list(map(lambda x: 1 / (1 + np.exp(-x)), X_trans))
y = bernoulli.rvs(p, size= n)
X = pd.DataFrame(data = X); y = pd.Series(y)
## within that particular MC simulation, do 10 folds CV
cv = StratifiedKFold(n_splits= 10, shuffle=True, random_state=42)
AUC_folds = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} # same number as folders
#
for folder, (train_index, val_index) in enumerate(cv.split(X, y)):
X_train,X_val = X.iloc[train_index],X.iloc[val_index]
y_train,y_val = y.iloc[train_index],y.iloc[val_index]
#
X0_train, X1_train = helper(X_train, y_train); X0_val, X1_val = helper(X_val, y_val)
for method in methods:
model = AllMethod(method= method, bool_trans= False).fit(X0_train,X1_train)
_,_, auc = model.predict(X0_val,X1_val)
AUC_folds[method].append(auc)
#print(AUC_folds)
for key, val in AUC_folds.items():
AUC[key].append( np.mean(np.array(val) ))
print({key: (np.mean(np.array(val)) ,np.std(np.array(val))) for key,val in AUC.items()})
return AUC
u = [0]*4; ## p1 = p0
sigma = [[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]]
_ = MonteCarlo_3(200, 40, u, sigma)
_ = MonteCarlo_3(200, 50, u, sigma)
_ = MonteCarlo_3(200, 100, u, sigma)
###Output
{'suliu': (0.6505657142857143, 0.07721729948158654), 'logistic': (0.6498232142857142, 0.07729269143892227), 'stepwise': (0.6385311904761904, 0.08472136458251457), 'min-max': (0.49920630952380945, 0.09037241979134311), 'rf': (0.7742550595238095, 0.05250776260554239), 'svml': (0.6032980357142858, 0.130584319585125), 'svmr': (0.8117191666666667, 0.05666287572591112)}
###Markdown
Normal-distribution, with logit(P(Y|X)) = 10*(sin(pix1) + sin(pix2) + ... )
###Code
def MonteCarlo_2(T, n, u, sigma):
"""simulation for last scenario: generate X first from normal, then generate y via logit(Y|X) = 10* ((sinpi*x1) + ... )
T: number of simulation
n: sample size
u: mean for X
sigma: variance for X
"""
AUC = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} ## same num as simulation time
methods = ['suliu', 'logistic', 'stepwise','min-max', 'rf', 'svml', 'svmr']
for i in range(T):
### one monto carlo simulation of size n0 + n1
#i = 10
print(i)
np.random.seed(seed= 100*i+ 4*i)
X = multivariate_normal(u, sigma, size = n)
X_trans = [ 10*sum(list(map(lambda x: np.sin(np.pi*x) , ele))) for ele in X]
p = list(map(lambda x: 1 / (1 + np.exp(-x)), X_trans))
y = bernoulli.rvs(p, size= n)
X = pd.DataFrame(data = X); y = pd.Series(y)
## within that particular MC simulation, do 10 folds CV
cv = StratifiedKFold(n_splits= 10, shuffle=True, random_state=42)
AUC_folds = {'suliu':[], 'logistic':[], 'stepwise':[],'min-max':[], 'rf':[], 'svml':[], 'svmr':[]} # same number as folders
#
for folder, (train_index, val_index) in enumerate(cv.split(X, y)):
X_train,X_val = X.iloc[train_index],X.iloc[val_index]
y_train,y_val = y.iloc[train_index],y.iloc[val_index]
#
X0_train, X1_train = helper(X_train, y_train); X0_val, X1_val = helper(X_val, y_val)
for method in methods:
model = AllMethod(method= method, bool_trans= False).fit(X0_train,X1_train)
_,_, auc = model.predict(X0_val,X1_val)
AUC_folds[method].append(auc)
#print(AUC_folds)
for key, val in AUC_folds.items():
AUC[key].append( np.mean(np.array(val) ))
print({key: (np.mean(np.array(val)) ,np.std(np.array(val))) for key,val in AUC.items()})
return AUC
u = [0]*4; ## p1 = p0
sigma = [[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]]
_ = MonteCarlo_2(200, 40, u, sigma)
#{'suliu': (0.5052916666666666, 0.13708314969592564), 'logistic': (0.5051666666666667, 0.13665030389851152),
#'stepwise': (0.5091249999999999, 0.14328147681128298), 'min-max': (0.468375, 0.17256097452945598),
#'rf': (0.6886249999999999, 0.11789878397968695), 'svml': (0.5011875, 0.15691158686900722),
#'svmr': (0.46789583333333334, 0.15933825693011638)}
_ = MonteCarlo_2(200, 50, u, sigma)
#{'suliu': (0.5173333333333333, 0.13585725760681483), 'logistic': (0.5172916666666666, 0.13500498318992196),
#'stepwise': (0.524125, 0.13767261261372535), 'min-max': (0.48166666666666663, 0.13600194034563545),
#'rf': (0.7029583333333334, 0.10525728234241616), 'svml': (0.4928333333333333, 0.14566189923548606),
#'svmr': (0.5115833333333333, 0.14704683210913908)}
_ = MonteCarlo_2(200, 100, u, sigma)
#{'suliu': (0.49881416666666667, 0.08754259033751344), 'logistic': (0.49831380952380955, 0.08802945503232416),
#'stepwise': (0.5085010714285714, 0.0858479995823942), 'min-max': (0.49105797619047614, 0.08602643655714516),
#'rf': (0.7383856547619047, 0.05699929043270678), 'svml': (0.4985396428571429, 0.09117177144911412),
#'svmr': (0.4884030357142857, 0.09638591813416322)}
_ = MonteCarlo_2(200, 200, u, sigma)
#{'suliu': (0.507084393939394, 0.058765174060825494), 'logistic': (0.5072064772727274, 0.05873872834045124),
#'stepwise': (0.5150096590909091, 0.0588409026415971), 'min-max': (0.4989740404040404, 0.059580195428004294),
#'rf': (0.7751914930555555, 0.03231156644512223), 'svml': (0.4987640025252525, 0.06576031881412701),
#'svmr': (0.5489882828282827, 0.08695929270601069)}
###Output
_____no_output_____ |
9 google customer revenue prediction/google-predictions.ipynb | ###Markdown
![](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAASgAAACqCAMAAAAp1iJMAAABU1BMVEX///8VbPfeTEH3tCYnsk0dq0UAnCOg16kAozbU8N3///37//8AVfAWbvgAVu8AUe8AWvG00f8AXvOnyf+OsPsAqDHdST76/P9Mu2PdRToAYfT/9vYAZ/fcQzf/+/vXLSD4oAD4mQDokYzv9v+gwPvbPDDM4P///vL3pwAARuwAS+zZNSjp8f//7+7WJhjUEADb6//3rBs6e/bVHQv5xmDliIP33Nv/+d7he3bvsKyWuv3roZ253P5cjvdjm/3lW1T9vT3lcWrhZV5Ph/cod/z7sD7/8snC1vz/7a15off3rw371p/7zof10tH87tDzxrn93Zn85LP4wsH5lpHQ7v/+3In70HH7uUsAkgBelvr8w24AkihhrXHr+e6AqPtDf/T/877/4YP9xDKx2bh1w4f90W7d8OH71IH5ran+y07jpqI2h0SRz5233smr1f5BomlNrV7bspenAAAMhUlEQVR4nO2c63vaRhaHkdimiZCEECwEWVgYIa7mHmMuAcIlJpCQtWli1+kGtpu66Xa32f7/n3ZmZC66QIufGNjkvI8/gBh7ht+cOXPO0cgOBwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAXxiugBr7VJs0m81J7VNMDbh2NxSGcc1gdjcKW1wx76AopZLJZCqVcrNuVuCa0djxbgbz/qfHt/z+30e7GcIKXN4mxYopt0AVu8UiJfKsJEuiJE0nsR2M5v3vD7/Refj9bzu0awuqp8iLklCceNUAIf+01pVkiqNkQerGtjxU14d/I53+gvnmm9+22/daXP0ii2S69qiGy7GJJFJIKyE12K5VuR59ePTowwMs1cP/bLXn9QQmkkTxnFc1f+DqT1kkFCWL1GDrHvVnXahvt93valTOLVNCM2D7oY+XKIQ42bpQj/ZNqFhP5ChpsupjL7IoSqptc0Q6+yZUnhIpjh2sbhAVqNQOdNo3oVQRLS22saaFl92FPe2bUGoDrTuZWhdVRpO1nQQy+yVUDelECd51TXxrluV9sldCxXgZeerGOothdpTD7JVQzFTgKC71adfjsGWfhOqzKPCW5F0Pw549EirQwAYlrAyhtgmzhIOUd/ZIqD4Jutk753HlcR1RHY8OV7dhTl+cv3r16vzF2crQ/o2n5jMw8eEsYYVQh+Mq7nU8uuuoN8dVQykvJffu5K0Pyyf+Ui6XK2WVbJo+KdtqFX998yx8dHQUHobD4WcfX9topU6KvNudcvOsjtvtTspkRHZCHY4q2m2n2VakvKW0Sp1ii5KuLbnwn2DUzuZKrZN6vdJRNFrLlipla6OLq+FR4ury/Pzj1bDgdIbDl6emFsc+Ksk3JpMmJVEcgupJIt/VZ85GqHK7lMt2TiJtOhtEnSr1gzsMfXOOBRkLNbDPhtdxUNfSufYog8zoMDOmszTtTwerpkbx80Th6F9np3H0Mn42TDidhcTzF4Ym+SKfKvbVQECNTVgJ+8tGPjarqVqF+jWdznVQr4cHmXpWoelgqb1m0X8+PO475rvlTknTfpm/PYhk/TQdykUMjU6fJQqFhS7xj2EnIvx2abn0ZV6cJ08eUvviF1UKs1BMJKcp48UYkFK00tp48HdgwiOhODa66e9laCWkjZevREpIqaBSX7p06iw4w2dLF5iPCaxU4nyuRL4ny8WFg/ToW8t8DzYLVVfo0pLVZrQQUqpknJ37oSnoQm3oEjMdjS7VDZcO2mkar4TF9zi9QjqdGxrFnxSIUhe375kGzwnL5lwTiFKe27cmoarZoHay/Peq2KSCIRvf+JlhusJdLIqplGjNb7qYofH0hkoz58o8STgTQ1Oj0+dYqEI4rr/1omhXNoQmXWTinFi83VyMQh3kQrRmCAkyLdxn9uTe3RSDC3YUJW7oo8ZomSlmx+34FTl0POrbtxdhvMjMjc71xfeRmDCpWxhDkxiLByTcmpRRqJMsrRld90EnhOcmfe87H0n00KRONtr1DmmNDqYt9n4YwtPrT+tzzjxDggzNsYAj7iQUiOuK4S13aug7oMcrPv2dQaiyH02Pcb3XNX8oG4zc/9JzNIhQG8ZRVWQ6oVbGcj2iEJOKEGvBplO4sgjluCE73/AVbuTlsVDGvqPYS8kN/aJBKOyQSosN5LBcp3NZrVLdSiA14IlFFTeJzA8qGhKqYxWqnPbTMwnj74ZIqCdxS6PXZO0VnPgTH54mU1YQwxYlT60BJ+m2NHNRh+OTYC7nj4y2E286fLpQcn6D3yn7g0iOtlWog5aGNyEFr4RTvL0V3lmFij8jG9/wNXrdxB7S6MxRACrbC5Xxh2YWxWTqrWw23f5lK7Em4RMRihP/9LaHHMpI8dsLxURwhEArOA69GK4Qink1JO78LXp9TWqrHsPngWvZfumV02h+cPSRGVVw7lTfYkqM5o+kMChtWNUgUPMt8nr0Cn2BKpbDbuk5xsRJKTgAfEuEurIKpUvoDF+ijHyCl57oM3zsGsgoXvHpBddloUYKEipbPai2FJTsVW26v0/UIomFKXHFtnfccLM8P0/rU/hLYV9hL9RII0JV9CAKCfXc6swdZ2TphW/QS69kdZBYKEqyCQ9wTIJ31VzWv41tzkRgQAIpzm2/9vLTpIDgZZzVc7fjJ0IFaZvBlmn/TKgbIlTBTqjnc6FilCEO10d0LXPS9LaCbxAqG8T9au3xlo1Jx8OTwcr2Nxc+1aIEIucs2aik5y7bRKblN1iU7rJNnF7NhQo0SRzeW/44X6TkpF0KMyJCaduJBqyQTQbBr71d5cXxMufWxx8hCyz9i7WZnu8p36GXr3ShLJE5zvf0EgJ+HTMlwaSrJa9l2PVCQRKlbfwVPxMTkQglTdfFnF5+SSiSidLptrUZEcpPtnDdZSdurI10ofS8mPElcedLNxXzPVlY5AkGofRcsrUji3K8YSndU/jWNDIINdJIXJm2NiNCBdPYh5wRoZzmpNhxK1RhqO+HgWaK4jhJmHUeK6INY7GxGALONrHk7NjyF7eE71apVH91G4NQmQ5eA8vZxIxMB0modfCc657IefTC0kgXahazu/CZIo5ju558Ph8bCCK17NqNKQxJukP0rs69HvcksvFJa/IYg1AOPa7MnlialbEzvy0rvNVTlUtLI1JpSSwE9DRYXhBTKarYEwRKMMTpxqQYLz0cSm3+HT8PHt1LceJqN2UUakSclD9kCY1xHOW/jRvOSPLrtEZSJI5avhyITRrF3rQnyzLHGx2AscxCtlv7wGQ71JIUsSmhu8qmjEI5KosI3ABy8/7cbMKfLJUJlnk5tFuRLlcXhSAcb0z8jEKVcyTk1GwKF9vBhTyqblOrlDIJVU7jVWApSR10NFqZu5DToZ79mkwKJ8WFZ9Yu8mSu1grlaOumrLR3tfMFBm7doUuyx7aBSSjH30hBJW2qBiNvqwUX4l0QkyokjI3OE86C0yZgj+lTVTRcNAmVIeUJZMsd8xRtSzmXL0XiTk5iB3YVF7NQjnpJj8GXB1jOBkPLJW3mpV72vVnOjF8jnQo28bpuUSikaiz3b74LUw7pSqXTy0lxOVLdWr3FFRVIlMBRQq+mWjZgIhS1JBTzXQ6vvnRlMcIRHdJyBv/OfEzg1Ze4XPy9s0KhELbTyeEYJIlUQm8pTLkV6qd5fjXK6UqFlE5dt6qDUV052eZSjDVEmfh0ihW7nmPV5UBfD/0EAupx7FpASfEiBcOMg7jsgaaWmP1BJpJLK5aKwkWBKDW8iCOrYuKnN4lE2KbqSWD0QgYnCPNjkK7HD8mjCw8ff/vbz+/JpXKLpHxIqmya7lTaLS239XjBO5UEopUs8NTUF/V6PN5ozdeccrxAiYJEdQ2rslxRlJA/pLQq1Wq9reVKtE2+evoO30d3JpyXL1+eP0kcJZwXtjq51HzMoy8+juIHulLv//7D7GGYBz98P7tnFVGyulXRmpZWStn2Vgt4BLXfYN2CXk2QJIEVBJYVZVFKpVJi8fpp3hRmHYwq6VxJwedZcrlSy76UFj+7HCaOwsPhESLxzwsbN86oXt91sSdLsl7JQGucnIV4//jBX+c8+DAzM6YcCeZyCqZUynbGu9kB1WhzSkksiv0k9MMKksxNryfRp6tOeJar9UgkUq+urRGdvfjx8t3lj+f/sDGmQL7WSyWTLNUrXjebU9QfNqoULgUH3jvmj+uZ+h+RbiO/jnYVUWHUWN8b9U0mvloNLb5+7Pg+D07HBjLPuqc+TyyPvKJDrQmS7id3dAx5QxiHK4CPBt53+hmoybzMTz3qYi76sl6bTr25577/nzi+Tlpv5x83JFJN7e7To4y7xdVI2R17UPW719QuHjrdT3AhjLd5tE1tSDjrW1ub/pogyZ3tLWqczYBQc3wix0lF2/3ChxKBjQ+3fbH0JG7VHeo+z4FFzQjgdEno2prNG5SDy2tK+F8Vx3hvkznb6jMWqneXo+9fIoyED5WLtgssynLunTxxupd0yeGoro3hqLjis/kjAl8qXlJ+Fm2SOi8vu8FDzVFJrkKxDfMNDY/7Dx7c/drIU0QpU+057+NFtxcSvWXIv1vA/yKn16zliU869gxk0d2DdWdCHUj4Xy5wFO92S1SPo9hkSijWIDCw4OoPOJHlBaSWzKZwybkR3eSA8leEK9D3NbtFFHoWu9e1p8cQFayFUd/k9+7f2AEAAAAAAAAAAAAAAAAAAOwn/wPDa5Padjf3WwAAAABJRU5ErkJggg==)
###Code
import pandas as pd
import numpy as np
# DRAGONS
import xgboost as xgb
import lightgbm as lgb
import catboost as cat
# plots
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# pandas / plt options
pd.options.display.max_columns = 999
plt.rcParams['figure.figsize'] = (14, 7)
font = {'family' : 'verdana',
'weight' : 'bold',
'size' : 14}
plt.rc('font', **font)
# remove warnings
import warnings
warnings.simplefilter("ignore")
# garbage collector
import gc
gc.enable()
###Output
_____no_output_____
###Markdown
Loading data
###Code
train = pd.read_csv('../input/create-extracted-json-fields-dataset/extracted_fields_train.gz', dtype={'date': str, 'fullVisitorId': str, 'sessionId':str, 'visitId': np.int64})
test = pd.read_csv('../input/create-extracted-json-fields-dataset/extracted_fields_test.gz', dtype={'date': str, 'fullVisitorId': str, 'sessionId':str, 'visitId': np.int64})
train.shape, test.shape
train.head()
train.columns
# Getting data from leak
train_store_1 = pd.read_csv('../input/exported-google-analytics-data/Train_external_data.csv', low_memory=False, skiprows=6, dtype={"Client Id":'str'})
train_store_2 = pd.read_csv('../input/exported-google-analytics-data/Train_external_data_2.csv', low_memory=False, skiprows=6, dtype={"Client Id":'str'})
test_store_1 = pd.read_csv('../input/exported-google-analytics-data/Test_external_data.csv', low_memory=False, skiprows=6, dtype={"Client Id":'str'})
test_store_2 = pd.read_csv('../input/exported-google-analytics-data/Test_external_data_2.csv', low_memory=False, skiprows=6, dtype={"Client Id":'str'})
# Getting VisitId from Google Analytics...
for df in [train_store_1, train_store_2, test_store_1, test_store_2]:
df["visitId"] = df["Client Id"].apply(lambda x: x.split('.', 1)[1]).astype(np.int64)
# Merge with train/test data
train = train.merge(pd.concat([train_store_1, train_store_2], sort=False), how="left", on="visitId")
test = test.merge(pd.concat([test_store_1, test_store_2], sort=False), how="left", on="visitId")
# Drop Client Id
for df in [train, test]:
df.drop("Client Id", 1, inplace=True)
train.columns
# Cleaning Revenue
for df in [train, test]:
df["Revenue"].fillna('$', inplace=True)
df["Revenue"] = df["Revenue"].apply(lambda x: x.replace('$', '').replace(',', ''))
df["Revenue"] = pd.to_numeric(df["Revenue"], errors="coerce")
df["Revenue"].fillna(0.0, inplace=True)
for df in [train_store_1, train_store_2, test_store_1, test_store_2]:
del df
gc.collect()
###Output
_____no_output_____
###Markdown
Looking around Some pictures to have in mind: target distribution
###Code
target_sums = train.groupby("fullVisitorId")["totals.transactionRevenue"].sum().reset_index()
plt.scatter(range(target_sums.shape[0]), np.sort(np.log1p(target_sums["totals.transactionRevenue"].values)))
plt.xlabel('index')
plt.ylabel('TransactionRevenue')
plt.show()
###Output
_____no_output_____
###Markdown
Key problem:
###Code
train.date = pd.to_datetime(train.date, format="%Y%m%d")
test.date = pd.to_datetime(test.date, format="%Y%m%d")
train.date.value_counts().sort_index().plot(label="train")
test.date.value_counts().sort_index().plot(label="test")
plt.legend()
###Output
_____no_output_____
###Markdown
Comparing categories in train and test:
###Code
def drawBars(columnname):
sns.barplot(x="count", y="index", hue="dataset",
data=pd.melt(pd.concat([train[columnname].value_counts().rename("train"),
test[columnname].value_counts().rename("test")], axis=1, sort="False").reset_index(),
id_vars="index", var_name="dataset", value_name="count"))
drawBars("channelGrouping")
drawBars("geoNetwork.continent")
ids_train = set(train.fullVisitorId.unique())
ids_test = set(test.fullVisitorId.unique())
print("Unique visitor ids in train:", len(ids_train))
print("Unique visitor ids in test:", len(ids_test))
print("Common visitors in train and test:", len(ids_train & ids_test))
###Output
_____no_output_____
###Markdown
Weird "double" sessions:
###Code
problem = train[train.sessionId.map(train.sessionId.value_counts() == 2)].sort_values(["sessionId", 'visitStartTime'])
problem.head(10)
###Output
_____no_output_____
###Markdown
VisitStartTime seems to be same thing as visitId... yet not always!
###Code
(train.visitStartTime == train.visitId).value_counts()
###Output
_____no_output_____
###Markdown
Suspicious simultaneous visitors with same visitorId and same visitStartTime:
###Code
train.loc[pd.to_datetime(train.visitStartTime, unit='s') == "2017-04-25 18:49:35"].head(8)
###Output
_____no_output_____
###Markdown
Seems to be a serious problem:
###Code
print("Train: ", np.bincount(train.visitId.value_counts()))
print("test: ", np.bincount(test.visitId.value_counts()))
###Output
_____no_output_____
###Markdown
Preprocessing Setting time as index and saving time as feature (for FE purposes only)
###Code
train.visitStartTime = pd.to_datetime(train.visitStartTime, unit='s')
test.visitStartTime = pd.to_datetime(test.visitStartTime, unit='s')
train["date"] = train.visitStartTime
test["date"] = test.visitStartTime
train.set_index("visitStartTime", inplace=True)
test.set_index("visitStartTime", inplace=True)
train.sort_index(inplace=True)
test.sort_index(inplace=True)
###Output
_____no_output_____
###Markdown
Clearing rare categories and setting 0 to NaNs:
###Code
def clearRare(columnname, limit = 1000):
# you may search for rare categories in train, train&test, or just test
#vc = pd.concat([train[columnname], test[columnname]], sort=False).value_counts()
vc = test[columnname].value_counts()
common = vc > limit
common = set(common.index[common].values)
print("Set", sum(vc <= limit), columnname, "categories to 'other';", end=" ")
train.loc[train[columnname].map(lambda x: x not in common), columnname] = 'other'
test.loc[test[columnname].map(lambda x: x not in common), columnname] = 'other'
print("now there are", train[columnname].nunique(), "categories in train")
train.fillna(0, inplace=True)
test.fillna(0, inplace=True)
clearRare("device.browser")
clearRare("device.operatingSystem")
clearRare("geoNetwork.country")
clearRare("geoNetwork.city")
clearRare("geoNetwork.metro")
clearRare("geoNetwork.networkDomain")
clearRare("geoNetwork.region")
clearRare("geoNetwork.subContinent")
clearRare("trafficSource.adContent")
clearRare("trafficSource.campaign")
clearRare("trafficSource.keyword")
clearRare("trafficSource.medium")
clearRare("trafficSource.referralPath")
clearRare("trafficSource.source")
# Clearing leaked data:
for df in [train, test]:
df["Avg. Session Duration"][df["Avg. Session Duration"] == 0] = "00:00:00"
df["Avg. Session Duration"] = df["Avg. Session Duration"].str.split(':').apply(lambda x: int(x[0]) * 60 + int(x[1]))
df["Bounce Rate"] = df["Bounce Rate"].astype(str).apply(lambda x: x.replace('%', '')).astype(float)
df["Goal Conversion Rate"] = df["Goal Conversion Rate"].astype(str).apply(lambda x: x.replace('%', '')).astype(float)
###Output
_____no_output_____
###Markdown
Features Based on strange things in dataset:
###Code
for df in [train, test]:
# remember these features were equal, but not always? May be it means something...
df["id_incoherence"] = pd.to_datetime(df.visitId, unit='s') != df.date
# remember visitId dublicates?
df["visitId_dublicates"] = df.visitId.map(df.visitId.value_counts())
# remember session dublicates?
df["session_dublicates"] = df.sessionId.map(df.sessionId.value_counts())
###Output
_____no_output_____
###Markdown
Basic time features:
###Code
for df in [train, test]:
df['weekday'] = df['date'].dt.dayofweek.astype(object)
df['time'] = df['date'].dt.second + df['date'].dt.minute*60 + df['date'].dt.hour*3600
#df['month'] = df['date'].dt.month # it must not be included in features during learning!
df['day'] = df['date'].dt.date # it must not be included in features during learning!
###Output
_____no_output_____
###Markdown
Looking to future features (from https://www.kaggle.com/ashishpatel26/future-is-here):
###Code
df = pd.concat([train, test])
df.sort_values(['fullVisitorId', 'date'], ascending=True, inplace=True)
df['prev_session'] = (df['date'] - df[['fullVisitorId', 'date']].groupby('fullVisitorId')['date'].shift(1)).astype(np.int64) // 1e9 // 60 // 60
df['next_session'] = (df['date'] - df[['fullVisitorId', 'date']].groupby('fullVisitorId')['date'].shift(-1)).astype(np.int64) // 1e9 // 60 // 60
df.sort_index(inplace=True)
train = df[:len(train)]
test = df[len(train):]
###Output
_____no_output_____
###Markdown
Paired categories from "teach-lightgbm-to-sum-predictions" kernel
###Code
for df in [train, test]:
df['source.country'] = df['trafficSource.source'] + '_' + df['geoNetwork.country']
df['campaign.medium'] = df['trafficSource.campaign'] + '_' + df['trafficSource.medium']
df['browser.category'] = df['device.browser'] + '_' + df['device.deviceCategory']
df['browser.os'] = df['device.browser'] + '_' + df['device.operatingSystem']
for df in [train, test]:
df['device_deviceCategory_channelGrouping'] = df['device.deviceCategory'] + "_" + df['channelGrouping']
df['channelGrouping_browser'] = df['device.browser'] + "_" + df['channelGrouping']
df['channelGrouping_OS'] = df['device.operatingSystem'] + "_" + df['channelGrouping']
for i in ['geoNetwork.city', 'geoNetwork.continent', 'geoNetwork.country','geoNetwork.metro', 'geoNetwork.networkDomain', 'geoNetwork.region','geoNetwork.subContinent']:
for j in ['device.browser','device.deviceCategory', 'device.operatingSystem', 'trafficSource.source']:
df[i + "_" + j] = df[i] + "_" + df[j]
df['content.source'] = df['trafficSource.adContent'].astype(str) + "_" + df['source.country']
df['medium.source'] = df['trafficSource.medium'] + "_" + df['source.country']
###Output
_____no_output_____
###Markdown
User-aggregating features:
###Code
for feature in ["totals.hits", "totals.pageviews"]:
info = pd.concat([train, test], sort=False).groupby("fullVisitorId")[feature].mean()
train["usermean_" + feature] = train.fullVisitorId.map(info)
test["usermean_" + feature] = test.fullVisitorId.map(info)
for feature in ["visitNumber"]:
info = pd.concat([train, test], sort=False).groupby("fullVisitorId")[feature].max()
train["usermax_" + feature] = train.fullVisitorId.map(info)
test["usermax_" + feature] = test.fullVisitorId.map(info)
###Output
_____no_output_____
###Markdown
Encoding features
###Code
excluded = ['date', 'fullVisitorId', 'sessionId', 'totals.transactionRevenue', 'visitId', 'visitStartTime',
'month', 'day', 'help']
cat_cols = [f for f in train.columns if (train[f].dtype == 'object' and f not in excluded)]
real_cols = [f for f in train.columns if (not f in cat_cols and f not in excluded)]
train[cat_cols].nunique()
from sklearn.preprocessing import LabelEncoder
for col in cat_cols:
lbl = LabelEncoder()
lbl.fit(list(train[col].values.astype('str')) + list(test[col].values.astype('str')))
train[col] = lbl.transform(list(train[col].values.astype('str')))
test[col] = lbl.transform(list(test[col].values.astype('str')))
for col in real_cols:
train[col] = train[col].astype(float)
test[col] = test[col].astype(float)
train[real_cols + cat_cols].head()
for to_del in ["date", "sessionId", "visitId", "day"]:
del train[to_del]
del test[to_del]
###Output
_____no_output_____
###Markdown
Preparing validation
###Code
excluded = ['date', 'fullVisitorId', 'sessionId', 'totals.transactionRevenue', 'visitId', 'visitStartTime', "month", "help"]
cat_cols = [f for f in train.columns if (train[f].dtype == 'int64' and f not in excluded)]
real_cols = [f for f in train.columns if (not f in cat_cols and f not in excluded)]
###Output
_____no_output_____
###Markdown
Function to tell us the score using the metric we actually care about
###Code
from sklearn.metrics import mean_squared_error
def score(data, y):
validation_res = pd.DataFrame(
{"fullVisitorId": data["fullVisitorId"].values,
"transactionRevenue": data["totals.transactionRevenue"].values,
"predictedRevenue": np.expm1(y)})
validation_res = validation_res.groupby("fullVisitorId")["transactionRevenue", "predictedRevenue"].sum().reset_index()
return np.sqrt(mean_squared_error(np.log1p(validation_res["transactionRevenue"].values),
np.log1p(validation_res["predictedRevenue"].values)))
###Output
_____no_output_____
###Markdown
Cute function to validate and prepare stacking
###Code
from sklearn.model_selection import GroupKFold
class KFoldValidation():
def __init__(self, data, n_splits=5):
unique_vis = np.array(sorted(data['fullVisitorId'].astype(str).unique()))
folds = GroupKFold(n_splits)
ids = np.arange(data.shape[0])
self.fold_ids = []
for trn_vis, val_vis in folds.split(X=unique_vis, y=unique_vis, groups=unique_vis):
self.fold_ids.append([
ids[data['fullVisitorId'].astype(str).isin(unique_vis[trn_vis])],
ids[data['fullVisitorId'].astype(str).isin(unique_vis[val_vis])]
])
def validate(self, train, test, features, model, name="", prepare_stacking=False,
fit_params={"early_stopping_rounds": 50, "verbose": 100, "eval_metric": "rmse"}):
model.FI = pd.DataFrame(index=features)
full_score = 0
if prepare_stacking:
test[name] = 0
train[name] = np.NaN
for fold_id, (trn, val) in enumerate(self.fold_ids):
devel = train[features].iloc[trn]
y_devel = np.log1p(train["totals.transactionRevenue"].iloc[trn])
valid = train[features].iloc[val]
y_valid = np.log1p(train["totals.transactionRevenue"].iloc[val])
print("Fold ", fold_id, ":")
model.fit(devel, y_devel, eval_set=[(valid, y_valid)], **fit_params)
if len(model.feature_importances_) == len(features): # some bugs in catboost?
model.FI['fold' + str(fold_id)] = model.feature_importances_ / model.feature_importances_.sum()
predictions = model.predict(valid)
predictions[predictions < 0] = 0
print("Fold ", fold_id, " error: ", mean_squared_error(y_valid, predictions)**0.5)
fold_score = score(train.iloc[val], predictions)
full_score += fold_score / len(self.fold_ids)
print("Fold ", fold_id, " score: ", fold_score)
if prepare_stacking:
train[name].iloc[val] = predictions
test_predictions = model.predict(test[features])
test_predictions[test_predictions < 0] = 0
test[name] += test_predictions / len(self.fold_ids)
print("Final score: ", full_score)
return full_score
Kfolder = KFoldValidation(train)
lgbmodel = lgb.LGBMRegressor(n_estimators=1000, objective="regression", metric="rmse", num_leaves=31, min_child_samples=100,
learning_rate=0.03, bagging_fraction=0.7, feature_fraction=0.5, bagging_frequency=5,
bagging_seed=2019, subsample=.9, colsample_bytree=.9, use_best_model=True)
Kfolder.validate(train, test, real_cols + cat_cols, lgbmodel, "lgbpred", prepare_stacking=True)
lgbmodel.FI.mean(axis=1).sort_values()[:30].plot(kind="barh")
###Output
_____no_output_____
###Markdown
User-level Make one user one object:* all features are averaged* we hope, that categorical features do not change for one user (that's not true :/ )* categoricals labels are averaged (!!!) and are treated as numerical features (o_O)* predictions are averaged in multiple ways...
###Code
def create_user_df(df):
agg_data = df[real_cols + cat_cols + ['fullVisitorId']].groupby('fullVisitorId').mean()
pred_list = df[['fullVisitorId', 'lgbpred']].groupby('fullVisitorId').apply(lambda visitor_df: list(visitor_df.lgbpred))\
.apply(lambda x: {'pred_'+str(i): pred for i, pred in enumerate(x)})
all_predictions = pd.DataFrame(list(pred_list.values), index=agg_data.index)
feats = all_predictions.columns
all_predictions['t_mean'] = all_predictions.mean(axis=1)
all_predictions['t_median'] = all_predictions.median(axis=1) # including t_mean as one of the elements? well, ok
all_predictions['t_sum_log'] = all_predictions.sum(axis=1)
all_predictions['t_sum_act'] = all_predictions.fillna(0).sum(axis=1)
all_predictions['t_nb_sess'] = all_predictions.isnull().sum(axis=1)
full_data = pd.concat([agg_data, all_predictions], axis=1).astype(float)
full_data['fullVisitorId'] = full_data.index
del agg_data, all_predictions
gc.collect()
return full_data
user_train = create_user_df(train)
user_test = create_user_df(test)
features = list(user_train.columns)[:-1] # don't include "fullVisitorId"
user_train["totals.transactionRevenue"] = train[['fullVisitorId', 'totals.transactionRevenue']].groupby('fullVisitorId').sum()
for f in features:
if f not in user_test.columns:
user_test[f] = np.nan
###Output
_____no_output_____
###Markdown
Meta-models
###Code
Kfolder = KFoldValidation(user_train)
lgbmodel = lgb.LGBMRegressor(n_estimators=1000, objective="regression", metric="rmse", num_leaves=31, min_child_samples=100,
learning_rate=0.03, bagging_fraction=0.7, feature_fraction=0.5, bagging_frequency=5,
bagging_seed=2019, subsample=.9, colsample_bytree=.9,
use_best_model=True)
Kfolder.validate(user_train, user_test, features, lgbmodel, name="lgbfinal", prepare_stacking=True)
xgbmodel = xgb.XGBRegressor(max_depth=22, learning_rate=0.02, n_estimators=1000,
objective='reg:linear', gamma=1.45, seed=2019, silent=False,
subsample=0.67, colsample_bytree=0.054, colsample_bylevel=0.50)
Kfolder.validate(user_train, user_test, features, xgbmodel, name="xgbfinal", prepare_stacking=True)
catmodel = cat.CatBoostRegressor(iterations=500, learning_rate=0.2, depth=5, random_seed=2019)
Kfolder.validate(user_train, user_test, features, catmodel, name="catfinal", prepare_stacking=True,
fit_params={"use_best_model": True, "verbose": 100})
###Output
_____no_output_____
###Markdown
Ensembling dragons
###Code
user_train['PredictedLogRevenue'] = 0.4 * user_train["lgbfinal"] + \
0.2 * user_train["xgbfinal"] + \
0.4 * user_train["catfinal"]
score(user_train, user_train.PredictedLogRevenue)
user_test['PredictedLogRevenue'] = 0.4 * user_test["lgbfinal"] + 0.4 * user_test["catfinal"] + 0.2 * user_test["xgbfinal"]
user_test[['PredictedLogRevenue']].to_csv('leaky submission.csv', index=True)
###Output
_____no_output_____ |
workshop/nipype_tutorial/notebooks/basic_execution_configuration.ipynb | ###Markdown
Execution Configuration OptionsNipype gives you many liberties on how to create workflows, but the execution of them uses a lot of default parameters. But you have of course all the freedom to change them as you like.Nipype looks for the configuration options in the local folder under the name ``nipype.cfg`` and in ``~/.nipype/nipype.cfg`` (in this order). It can be divided into **Logging** and **Execution** options. A few of the possible options are the following: Logging- **`workflow_level`**: How detailed the logs regarding workflow should be (possible values: ``INFO`` and ``DEBUG``; default value: ``INFO``)- **`utils_level`**: How detailed the logs regarding nipype utils, like file operations (for example overwriting warning) or the resource profiler, should be (possible values: ``INFO`` and ``DEBUG``; default value: ``INFO``)- **`interface_level`**: How detailed the logs regarding interface execution should be (possible values: ``INFO`` and ``DEBUG``; default value: ``INFO``)- **`filemanip_level`** (deprecated as of 1.0): How detailed the logs regarding file operations (for example overwriting warning) should be (possible values: ``INFO`` and ``DEBUG``)- **`log_to_file`**: Indicates whether logging should also send the output to a file (possible values: ``true`` and ``false``; default value: ``false``)- **`log_directory`**: Where to store logs. (string, default value: home directory)- **`log_size`**: Size of a single log file. (integer, default value: 254000)- **`log_rotate`**: How many rotations should the log file make. (integer, default value: 4) Execution- **`plugin`**: This defines which execution plugin to use. (possible values: ``Linear``, ``MultiProc``, ``SGE``, ``IPython``; default value: ``Linear``)- **`stop_on_first_crash`**: Should the workflow stop upon the first node crashing or try to execute as many nodes as possible? (possible values: ``true`` and ``false``; default value: ``false``)- **`stop_on_first_rerun`**: Should the workflow stop upon the first node trying to recompute (by that we mean rerunning a node that has been run before - this can happen due changed inputs and/or hash_method since the last run). (possible values: ``true`` and ``false``; default value: ``false``)- **`hash_method`**: Should the input files be checked for changes using their content (slow, but 100% accurate) or just their size and modification date (fast, but potentially prone to errors)? (possible values: ``content`` and ``timestamp``; default value: ``timestamp``)- **`keep_inputs`**: Ensures that all inputs that are created in the nodes working directory are kept after node execution (possible values: ``true`` and ``false``; default value: ``false``)- **`single_thread_matlab`**: Should all of the Matlab interfaces (including SPM) use only one thread? This is useful if you are parallelizing your workflow using MultiProc or IPython on a single multicore machine. (possible values: ``true`` and ``false``; default value: ``true``)- **`display_variable`**: Override the ``$DISPLAY`` environment variable for interfaces that require an X server. This option is useful if there is a running X server, but ``$DISPLAY`` was not defined in nipype's environment. For example, if an X server is listening on the default port of 6000, set ``display_variable = :0`` to enable nipype interfaces to use it. It may also point to displays provided by VNC, [xnest](http://www.x.org/archive/X11R7.5/doc/man/man1/Xnest.1.html) or [Xvfb](http://www.x.org/archive/X11R6.8.1/doc/Xvfb.1.html). If neither ``display_variable`` nor the ``$DISPLAY`` environment variable is set, nipype will try to configure a new virtual server using Xvfb. (possible values: any X server address; default value: not set)- **`remove_unnecessary_outputs`**: This will remove any interface outputs not needed by the workflow. If the required outputs from a node changes, rerunning the workflow will rerun the node. Outputs of leaf nodes (nodes whose outputs are not connected to any other nodes) will never be deleted independent of this parameter. (possible values: ``true`` and ``false``; default value: ``true``)- **`try_hard_link_datasink`**: When the DataSink is used to produce an organized output file outside of nipypes internal cache structure, a file system hard link will be attempted first. A hard link allows multiple file paths to point to the same physical storage location on disk if the conditions allow. By referring to the same physical file on disk (instead of copying files byte-by-byte) we can avoid unnecessary data duplication. If hard links are not supported for the source or destination paths specified, then a standard byte-by-byte copy is used. (possible values: ``true`` and ``false``; default value: ``true``)- **`use_relative_paths`**: Should the paths stored in results (and used to look for inputs) be relative or absolute. Relative paths allow moving the whole working directory around but may cause problems with symlinks. (possible values: ``true`` and ``false``; default value: ``false``)- **`local_hash_check`**: Perform the hash check on the job submission machine. This option minimizes the number of jobs submitted to a cluster engine or a multiprocessing pool to only those that need to be rerun. (possible values: ``true`` and ``false``; default value: ``true``)- **`job_finished_timeout`**: When batch jobs are submitted through, SGE/PBS/Condor they could be killed externally. Nipype checks to see if a results file exists to determine if the node has completed. This timeout determines for how long this check is done after a job finish is detected. (float in seconds; default value: 5)- **`remove_node_directories`** (EXPERIMENTAL): Removes directories whose outputs have already been used up. Doesn't work with IdentiInterface or any node that patches data through (without copying) (possible values: ``true`` and ``false``; default value: ``false``)- **`stop_on_unknown_version`**: If this is set to True, an underlying interface will raise an error, when no version information is available. Please notify developers or submit a patch.- **`parameterize_dirs`**: If this is set to True, the node's output directory will contain full parameterization of any iterable, otherwise parameterizations over 32 characters will be replaced by their hash. (possible values: ``true`` and ``false``; default value: ``true``)- **`poll_sleep_duration`**: This controls how long the job submission loop will sleep between submitting all pending jobs and checking for job completion. To be nice to cluster schedulers the default is set to 2 seconds.- **`xvfb_max_wait`**: Maximum time (in seconds) to wait for Xvfb to start, if the _redirect_x parameter of an Interface is True.- **`crashfile_format`**: This option controls the file type of any crashfile generated. Pklz crashfiles allow interactive debugging and rerunning of nodes, while text crashfiles allow portability across machines and shorter load time. (possible values: ``pklz`` and ``txt``; default value: ``pklz``) Resource Monitor- **`enabled`**: Enables monitoring the resources occupation (possible values: ``true`` and ``false``; default value: ``false``). All the following options will be dismissed if the resource monitor is not enabled.- **`sample_frequency`**: Sampling period (in seconds) between measurements of resources (memory, cpus) being used by an interface (default value: ``1``)- **`summary_file`**: Indicates where the summary file collecting all profiling information from the resource monitor should be stored after execution of a workflow. The ``summary_file`` does not apply to interfaces run independently. (unset by default, in which case the summary file will be written out to ``/resource_monitor.json`` of the top-level workflow).- **`summary_append`**: Append to an existing summary file (only applies to workflows). (default value: ``true``, possible values: ``true`` or ``false``). Example [logging] workflow_level = DEBUG [execution] stop_on_first_crash = true hash_method = timestamp display_variable = :1 [monitoring] enabled = false `Workflow.config` property has a form of a nested dictionary reflecting the structure of the `.cfg` file.
###Code
from nipype import Workflow
myworkflow = Workflow(name='myworkflow')
myworkflow.config['execution'] = {'stop_on_first_rerun': 'True',
'hash_method': 'timestamp'}
###Output
_____no_output_____
###Markdown
You can also directly set global config options in your workflow script. Anexample is shown below. This needs to be called before you import thepipeline or the logger. Otherwise, logging level will not be reset.
###Code
from nipype import config
cfg = dict(logging=dict(workflow_level = 'DEBUG'),
execution={'stop_on_first_crash': False,
'hash_method': 'content'})
config.update_config(cfg)
###Output
_____no_output_____
###Markdown
Enabling logging to fileBy default, logging to file is disabled. One can enable and write the file toa location of choice as in the example below.
###Code
import os
from nipype import config, logging
config.update_config({'logging': {'log_directory': os.getcwd(),
'log_to_file': True}})
logging.update_logging(config)
###Output
_____no_output_____
###Markdown
The logging update line is necessary to change the behavior of logging such asoutput directory, logging level, etc. Debug configurationTo enable debug mode, one can insert the following lines:
###Code
from nipype import config
config.enable_debug_mode()
###Output
_____no_output_____
###Markdown
In this mode the following variables are set:
###Code
config.set('execution', 'stop_on_first_crash', 'true')
config.set('execution', 'remove_unnecessary_outputs', 'false')
config.set('execution', 'keep_inputs', 'true')
config.set('logging', 'workflow_level', 'DEBUG')
config.set('logging', 'interface_level', 'DEBUG')
config.set('logging', 'utils_level', 'DEBUG')
###Output
_____no_output_____
###Markdown
The primary loggers (`workflow`, `interface` and `utils`) are also reset to level `DEBUG`.You may wish to adjust these manually using:```pythonfrom nipype import logginglogging.getLogger().setLevel()``` Global, workflow & node levelThe configuration options can be changed globally (i.e. for all workflows), for just a workflow, or for just a node. The implementations look as follows (note that you should first create directories if you want to change `crashdump_dir` and `log_directory`): At the global level:
###Code
from nipype import config, logging
import os
os.makedirs('/output/log_folder', exist_ok=True)
os.makedirs('/output/crash_folder', exist_ok=True)
config_dict={'execution': {'remove_unnecessary_outputs': 'true',
'keep_inputs': 'false',
'poll_sleep_duration': '60',
'stop_on_first_rerun': 'false',
'hash_method': 'timestamp',
'local_hash_check': 'true',
'create_report': 'true',
'crashdump_dir': '/output/crash_folder',
'use_relative_paths': 'false',
'job_finished_timeout': '5'},
'logging': {'workflow_level': 'INFO',
'filemanip_level': 'INFO',
'interface_level': 'INFO',
'log_directory': '/output/log_folder',
'log_to_file': 'true'}}
config.update_config(config_dict)
logging.update_logging(config)
###Output
_____no_output_____
###Markdown
At the workflow level:
###Code
from nipype import Workflow
wf = Workflow(name="config_test")
# Change execution parameters
wf.config['execution']['stop_on_first_crash'] = 'true'
# Change logging parameters
wf.config['logging'] = {'workflow_level' : 'DEBUG',
'filemanip_level' : 'DEBUG',
'interface_level' : 'DEBUG',
'log_to_file' : 'True',
'log_directory' : '/output/log_folder'}
###Output
_____no_output_____
###Markdown
At the node level:
###Code
from nipype import Node
from nipype.interfaces.fsl import BET
bet = Node(BET(), name="config_test")
bet.config = {'execution': {'keep_unnecessary_outputs': 'false'}}
###Output
_____no_output_____ |
notebook/2020.02.05_IGfromDist-rndmSeed-proteinsOnly-adenoContrastive.ipynb | ###Markdown
Sample Prep
###Code
samples = pd.read_csv('../data/TCGA/rna-seq_adeno/meta/gdc_sample_sheet.2020-01-27.tsv', sep="\t")
# get file type
samples['data'] = [val[1] for i,val in samples['File Name'].str.split(".").items()]
samples['Project ID'].value_counts()
###Output
_____no_output_____
###Markdown
Samples with RNAseq adjacent normal tissue
###Code
samples['Sample Type'].value_counts()
samples.loc[samples['Sample Type']=='Primary Tumor, Primary Tumor', 'Sample Type'] = 'Primary Tumor'
samples.loc[samples['Sample Type']=='Solid Tissue Normal, Solid Tissue Normal', 'Sample Type'] = 'Solid Tissue Normal'
samples['Sample Type'].value_counts()
# all cases with adjacent normal tissue
cases = samples[samples['Sample Type']=='Solid Tissue Normal']['Case ID']
# disparity in cases
samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Primary Tumor')
& (samples['data']=='FPKM')]['Case ID'].nunique()
samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Solid Tissue Normal')
& (samples['data']=='FPKM')]['Case ID'].nunique()
# divide, join, subset
case_tumor = samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Primary Tumor') &
(samples['data']=='FPKM')]
case_norm = samples[(samples['Case ID'].isin(cases)) & (samples['Sample Type']=='Solid Tissue Normal') &
(samples['data']=='FPKM')]
cases = pd.merge(case_tumor['Case ID'], case_norm['Case ID'])['Case ID']
cases.shape
case_tumor = case_tumor[case_tumor['Case ID'].isin(cases)]
case_norm = case_norm[case_norm['Case ID'].isin(cases)]
cases = pd.concat([case_tumor, case_norm])
case_tumor.shape
case_norm.shape
cases.shape
###Output
_____no_output_____
###Markdown
Map Ensembl genes to Proteins
###Code
id_map = pd.read_csv("/srv/home/wconnell/keiser/data/uniprot_mapping_ids/map_ensembl_uniprot.csv")
reviewed_proteins = pd.read_csv("/srv/home/wconnell/keiser/data/uniprot_mapping_ids/TCGA_rnaseq_uniprot_features.tab.gz", sep="\t")
proteins = pd.merge(id_map, reviewed_proteins, left_on='UNIPROT_ID', right_on='Entry name')
proteins['hgnc'] = [gene.split(";")[0] for gene in proteins['Gene names (primary )']]
proteins.shape
###Output
_____no_output_____
###Markdown
Dataset Prep
###Code
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
target = 'Sample Type'
cases[target] = cases[target].astype('category')
train, test = train_test_split(cases)
train[target].value_counts()
test[target].value_counts()
import torch
from torch.optim import lr_scheduler
import torch.optim as optim
from torch.autograd import Variable
#torch.manual_seed(123)
from trainer import fit
import visualization as vis
import numpy as np
cuda = torch.cuda.is_available()
print("Cuda is available: {}".format(cuda))
classes = {key:val for val,key in enumerate(train[target].cat.categories.values)}
classes
from tcga_datasets import TCGA, SiameseTCGA
root_dir = "../data/TCGA/rna-seq_adeno/"
batch_size = 1
train_dataset = TCGA(root_dir, samples=train, train=True, target=target, norm=False)
test_dataset = TCGA(root_dir, samples=test, train=False, target=target, norm=False)
scaler = StandardScaler()
train_dataset.data = pd.DataFrame(scaler.fit_transform(train_dataset.data),
index=train_dataset.data.index,
columns=train_dataset.data.columns)
test_dataset.data = pd.DataFrame(scaler.transform(test_dataset.data),
index=test_dataset.data.index,
columns=test_dataset.data.columns)
kwargs = {'num_workers': 10, 'pin_memory': True} if cuda else {'num_workers': 10}
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
###Output
_____no_output_____
###Markdown
Subset gene data to annotated proteins
###Code
assert np.array_equal(train_dataset.data.columns, test_dataset.data.columns)
parsed_cols = [ens[0] for ens in train_dataset.data.columns.str.split(".")]
train_dataset.data.columns, test_dataset.data.columns = parsed_cols, parsed_cols
protein_overlap_idx = np.isin(train_dataset.data.columns, proteins['ENSEMBL_ID'].values)
train_dataset.data = train_dataset.data.loc[:,protein_overlap_idx]
test_dataset.data = test_dataset.data.loc[:,protein_overlap_idx]
###Output
_____no_output_____
###Markdown
Write out test set for DE analysis
###Code
pd.to_pickle(test_dataset.data, "../data/tmp/test_dataset.pkl.gz")
map_cond = []
for label in test_dataset.labels:
if label == test_dataset.labels_dict['Primary Tumor']:
map_cond.append('Primary Tumor')
elif label == test_dataset.labels_dict['Solid Tissue Normal']:
map_cond.append('Solid Tissue Normal')
meta = pd.DataFrame({'label':test_dataset.labels,
'condition':map_cond},
index=test_dataset.data.index)
meta.to_pickle("../data/tmp/test_dataset_meta.pkl.gz")
###Output
_____no_output_____
###Markdown
Siamese Network
###Code
# Step 1 set up dataloader
root_dir = "../data/TCGA"
siamese_train_dataset = SiameseTCGA(train_dataset) # Returns pairs of images and target same/different
siamese_test_dataset = SiameseTCGA(test_dataset)
batch_size = 8
kwargs = {'num_workers': 10, 'pin_memory': True} if cuda else {}
siamese_train_loader = torch.utils.data.DataLoader(siamese_train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
siamese_test_loader = torch.utils.data.DataLoader(siamese_test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# Set up the network and training parameters
from tcga_networks import EmbeddingNet, SiameseNet
from losses import ContrastiveLoss
from metrics import AccumulatedAccuracyMetric
# Step 2
n_samples, n_features = siamese_train_dataset.data.shape
embedding_net = EmbeddingNet(n_features)
# Step 3
model = SiameseNet(embedding_net)
if cuda:
model.cuda()
# Step 4
margin = 1.
loss_fn = ContrastiveLoss(margin)
lr = 1e-3
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1)
n_epochs = 20
# print training metrics every log_interval * batch_size
log_interval = 30
train_loss, val_loss = fit(siamese_train_loader, siamese_test_loader, model, loss_fn, optimizer, scheduler,
n_epochs, cuda, log_interval)
plt.plot(range(0, n_epochs), train_loss, 'rx-')
plt.plot(range(0, n_epochs), val_loss, 'bx-')
train_embeddings_cl, train_labels_cl = vis.extract_embeddings(train_loader, model)
vis.plot_embeddings(train_embeddings_cl, train_labels_cl, siamese_train_dataset.labels_dict)
val_embeddings_baseline, val_labels_baseline = vis.extract_embeddings(test_loader, model)
vis.plot_embeddings(val_embeddings_baseline, val_labels_baseline, siamese_test_dataset.labels_dict)
###Output
_____no_output_____
###Markdown
Integrated GradientsTest completeness axiom through comparison of different baselines"Integrated gradients satisfy anaxiom called completeness that the attributions add up tothe difference between the output of F at the input x andthe baseline x'."
###Code
import copy
from captum.attr import LayerActivation
from captum.attr import IntegratedGradients
def attribution_pairs(SiameseTCGA, exp, ctrl):
# subset different samples
negative_pairs = np.array(SiameseTCGA.test_pairs)
negative_pairs = negative_pairs[negative_pairs[:,2] == 0]
# map labels to integers
ctrl = siamese_test_dataset.labels_dict[ctrl]
exp = siamese_test_dataset.labels_dict[exp]
# ordered indices of samples
ctrl_data = [idx for pair in negative_pairs[:, :2] for idx in pair if np.isin(idx, SiameseTCGA.label_to_indices[ctrl])]
exp_data = [idx for pair in negative_pairs[:, :2] for idx in pair if np.isin(idx, SiameseTCGA.label_to_indices[exp])]
# data
ctrl_data = Variable(SiameseTCGA.test_data[ctrl_data], requires_grad=True)
exp_data = Variable(SiameseTCGA.test_data[exp_data], requires_grad=True)
return ctrl_data, exp_data
###Output
_____no_output_____
###Markdown
IG with Control vector
###Code
ctrl_data, exp_data = attribution_pairs(siamese_test_dataset, exp='Primary Tumor', ctrl='Solid Tissue Normal')
from torch.nn import PairwiseDistance
pdist = PairwiseDistance(p=2)
pdist
ig = IntegratedGradients(model.get_dist)
torch.cuda.empty_cache()
###Output
_____no_output_____
###Markdown
Healthy as baseline
###Code
attr, delta = ig.attribute(exp_data.cuda(), ctrl_data.cuda(), target=None, n_steps=50, return_convergence_delta=True,
additional_forward_args=(ctrl_data.cuda(), pdist))
attr = attr.cpu().detach().numpy()
delta
feat_imp = pd.DataFrame(data=attr.mean(axis=0), index=train_dataset.data.columns, columns=['Attribution'])
feat_imp.shape
feat_imp.describe()
feat_imp.nlargest(10, columns='Attribution')
###Output
_____no_output_____
###Markdown
Protein Feature Attributions only
###Code
proteins['ENSEMBL_ID'].values.shape
attr.shape[1]
feat_imp = pd.DataFrame(data=attr.mean(axis=0), index=train_dataset.data.columns, columns=['Attribution'])
feat_imp.shape
feat_imp = pd.merge(feat_imp, proteins.drop_duplicates(subset='ENSEMBL_ID'), left_index=True, right_on='ENSEMBL_ID', how='left').sort_values(by='Attribution', ascending=False).reset_index(drop=True)
feat_imp.shape
feat_imp.to_pickle("../data/tmp/attr_avg.pkl.gz")
###Output
_____no_output_____
###Markdown
Now go to /srv/home/wconnell/github/diffxpy/notebook/2020.02.05_test_DE_analysis and run
###Code
feat_imp.columns
feat_imp.head()
feat_imp[['Attribution', 'hgnc', 'Protein names', 'Gene ontology (biological process)', 'Gene ontology (molecular function)']]
###Output
_____no_output_____ |
Lorenz/FigS_Markovianity.ipynb | ###Markdown
Coarse-grained estimate
###Code
f = h5py.File(dir_path+'Lorenz/kinetic_analysis/combined_coarse_tscales_results_3162_clusters.h5','r')
delay_range = np.array(f['delay_range'])
all_timps = np.array(f['timps'])
f.close()
dt=.01
all_tims = ma.masked_invalid(all_timps)
all_timps[all_timps==0]=ma.masked
mean = all_timps.mean(axis=0)
cil = np.percentile(all_timps,2.5,axis=0)
ciu = np.percentile(all_timps,97.5,axis=0)
plt.figure(figsize=(5,5))
plt.plot(delay_range*dt,mean)
plt.fill_between(delay_range*dt,cil,ciu,alpha=.6)
plt.xscale('log')
plt.xlim(2*dt,50)
plt.ylim(0,20)
# plt.savefig('coarse_tscales_Lorenz_log_ylim_95_ci.pdf')
plt.show()
mean = np.diff(all_timps,axis=1).mean(axis=0)
cil = np.percentile(np.diff(all_timps,axis=1),2.5,axis=0)
ciu = np.percentile(np.diff(all_timps,axis=1),97.5,axis=0)
plt.plot(delay_range[:-1]*dt,mean)
plt.fill_between(delay_range[:-1]*dt,cil,ciu,alpha=.5)
plt.xscale('log')
plt.xlim(2*dt,50)
plt.axhline(0,ls='--',c='k')
plt.show()
###Output
_____no_output_____
###Markdown
Full estimate
###Code
n_clusters=3162
f = h5py.File(dir_path+'/Lorenz/kinetic_analysis/combined_kinetic_results_{}_clusters_simpler.h5'.format(n_clusters),'r')
idx_range = np.sort(np.array(list(f.keys()),dtype=int))
delay_range = np.arange(2,6000,2)
all_timps = np.zeros((len(idx_range)-2,len(delay_range),2))
kidx=0
for idx in idx_range:
if len(list(f[str(idx)].keys()))>0:
if np.array(f[str(idx)]['timps']).sum()>0:
all_timps[kidx] = np.array(f[str(idx)]['timps'])
kidx+=1
f.close()
all_timps = ma.masked_invalid(all_timps)
all_timps[all_timps==0] = ma.masked
plt.figure(figsize=(5,5))
Lambda2 = 1/all_timps[:,:,0]+ 1/all_timps[:,:,1]
mean = ma.mean(2/Lambda2,axis=0)
cil = np.nanpercentile(ma.filled(2/Lambda2,np.nan),0.5,axis=0)
ciu = np.nanpercentile(ma.filled(2/Lambda2,np.nan),99.5,axis=0)
plt.plot(delay_range*dt,mean)
plt.fill_between(delay_range*dt,cil,ciu,alpha=.5)
plt.xscale('log')
# plt.yscale('log')
plt.xlim(0.02,50)
plt.ylim(0,20)
plt.show()
###Output
_____no_output_____ |
src/SMOTE_TrainingOnTwoYearsData_Price-Volume_GADF_CNN5.ipynb | ###Markdown
Training main loop
###Code
fileIndex = 0
encodedFeatures = ['Price', 'Volume']
encoded_feature_count = len(encodedFeatures)
minVicinity = 20
X_data=[]
Y_data=[]
NUMBER_OF_FILES_USEDTO_TRAIN = 20
for file_name in csvFileList[:NUMBER_OF_FILES_USEDTO_TRAIN]:
print(file_name)
coin_name = file_name.split('/')[-1].split('_')[1]
fileIndex +=1
obj = s3Client.get_object(Bucket = BUCKET_NAME, Key = file_name)
df = pd.read_csv(obj['Body'], index_col='0', parse_dates=True)
anomalyIndexes = df[df.Label==1].index
for i in anomalyIndexes:
mat = np.zeros((DATA_POINTS_PER_WINDOW, DATA_POINTS_PER_WINDOW, encoded_feature_count), 'float32')
try:
price = getGAFMatrix(df, 'Price', i, method='difference', span=10)
vol = getGAFMatrix(df, 'Volume', i, method='difference', span=10)
except:
print("Anomaly case={} exception occurred for coin when GADF {}".format(i.strftime('%Y-%m-%d_%H%M%S'), coin_name))
continue
mat[:,:,0]=price[0]
mat[:,:,1]=vol[0]
y=1
X_data.append(mat)
Y_data.append(y)
if ( len(df[df.Label==0].index) > int(len(anomalyIndexes)/2) ):
nonAnomalousIndexes = df[df.Label==0].sample(int(len(anomalyIndexes)/2), random_state=79).index
else:
# take a half from non anomalous indexes
nonAnomalousIndexes = df[df.Label==0].sample(int(len(df[df.Label==0])/2), random_state=79).index
surroundingIndexesToPosIndexes = getSurroundingIndexesToPosIndex(anomalyIndexes, df)
nonAnomalousIndexes = nonAnomalousIndexes.union(surroundingIndexesToPosIndexes[:int(len(surroundingIndexesToPosIndexes)/2)])
print("number of non anom cases={}".format(len(nonAnomalousIndexes)))
print("number of non anom cases={}".format(len(anomalyIndexes)))
for i in nonAnomalousIndexes:
mat = np.zeros((DATA_POINTS_PER_WINDOW, DATA_POINTS_PER_WINDOW, encoded_feature_count), 'float32')
try:
price = getGAFMatrix(df, 'Price', i, method='difference', span=10)
vol = getGAFMatrix(df, 'Volume', i, method='difference', span=10)
except:
print("NonAnomaly case={} exception occurred for coin when GADF {}".format(i.strftime('%Y-%m-%d_%H%M%S'), coin_name))
continue
mat[:,:,0]=price[0]
mat[:,:,1]=vol[0]
y=0
X_data.append(mat)
Y_data.append(y)
print('-------------- processed files %d' %fileIndex)
print(psutil.virtual_memory())
Y_dataArr = np.array(Y_data)
X_dataArr = np.array(X_data)
ax = sns.countplot(x=Y_dataArr, palette="Set3")
def create_gadfcnn5_model():
cnn=Sequential()
cnn.add(Conv2D(filters=64, kernel_size=(2,2), padding='same', activation='relu', input_shape=(INPUT_MATRIX_WIDTH, INPUT_MATRIX_WIDTH, ENCODED_FEATURES)))
cnn.add(Conv2D(filters=64, kernel_size=(2,2), padding='same', activation='relu'))
cnn.add(Dropout(0.25))
cnn.add(Flatten())
cnn.add(Dense(256, activation='relu'))
cnn.add(Dropout(0.5))
cnn.add(Dense(1, activation='sigmoid'))
cnn.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return cnn
kf = StratifiedKFold(n_splits=10)
history = []
confusions= []
classifReports= []
fold = 0
for train, test in kf.split(X_dataArr, Y_dataArr):
print('Running fold [%d]'.ljust(100,'*') %fold)
fold +=1
cnn=create_gadfcnn5_model()
x_train, x_test = X_dataArr[train], X_dataArr[test]
y_train, y_test = Y_dataArr[train], Y_dataArr[test]
hist = cnn.fit(x=x_train, y=y_train, validation_split=0.2, epochs=20, batch_size=500, verbose=0)
history.append(hist)
y_pred = cnn.predict(x_test)
y_pred_R = np.round(y_pred)
conf = confusion_matrix(y_test, y_pred_R)
confusions.append(conf)
clfr = classification_report(y_test, y_pred_R, output_dict=True)
print(clfr)
classifReports.append(clfr)
import statistics
f1s = [rep['macro avg']['f1-score'] for rep in classifReports]
recalls = [rep['macro avg']['recall'] for rep in classifReports]
precisions = [rep['macro avg']['precision'] for rep in classifReports]
print(statistics.variance(f1s))
print(statistics.variance(recalls))
print(statistics.variance(precisions))
print(statistics.stdev(f1s))
print(statistics.stdev(recalls))
print(statistics.stdev(precisions))
np.where(y_pred_R==1)
j=9
plt.plot(history[j].history['acc'])
plt.plot(history[j].history['val_acc'])
plt.legend(['acc','val_acc'])
j=9
plt.plot(history[j].history['loss'])
plt.plot(history[j].history['val_loss'])
plt.legend(['loss','val_loss'])
import seaborn as sns
finConf=np.zeros((2,2), dtype=int)
for elem in confusions:
for i in range(2):
for j in range(2):
finConf[i][j] += elem[i][j]
labels = ['True Neg','False Pos','False Neg','True Pos']
labels = np.asarray(labels).reshape(2,2)
sns.heatmap(finConf/np.sum(finConf), annot=True, fmt='.2%', cmap='Blues')
macroPrec=[]
macroRecall=[]
macrof1=[]
for elem in classifReports:
macroPrec.append(elem['macro avg']['precision'])
macroRecall.append(elem['macro avg']['recall'])
macrof1.append(elem['macro avg']['f1-score'])
print(np.mean(macroPrec))
print(np.mean(macroRecall))
print(np.mean(macrof1))
weighPrec=[]
weighRecall=[]
weighf1=[]
for elem in classifReports:
weighPrec.append(elem['weighted avg']['precision'])
weighRecall.append(elem['weighted avg']['recall'])
weighf1.append(elem['weighted avg']['f1-score'])
print(np.mean(weighPrec))
print(np.mean(weighRecall))
print(np.mean(weighf1))
###Output
_____no_output_____
###Markdown
Do a model train using X and Y created from first 20 files
###Code
gadfCnn5 = create_gadfcnn5_model()
hist = gadfCnn5.fit(x=X_dataArr, y=Y_dataArr, validation_split=0.2, epochs=10, batch_size=500, verbose=0)
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.legend(['acc','val_acc'])
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.legend(['loss','val_loss'])
modelSaveLocOnDisk = '<path>/GADF_CNN5.h5'
gadfCnn5.save(modelSaveLocOnDisk)
print("Saved model to disk at {}".format(modelSaveLocOnDisk))
from numpy import loadtxt
from keras.models import load_model
model = load_model(modelSaveLocOnDisk)
model.summary()
# evaluate the model
score = model.evaluate(X_dataArr, Y_dataArr, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], score[1]*100))
###Output
_____no_output_____ |
4_ANN/5_artificial_neural_network_answers.ipynb | ###Markdown
Artificial Neural Networks In this workshop we are going to implement a simple feed-forward artificial neural network (ANN) from scratch. The aim of this exercise is to give you a deeper understanding of how ANNs work "under the hood". Moreover, in this workshop, we are going to train an ANN for a classification task. Note that we do not have to come up with a particularly efficient ANN implementation (i.e., the one that works fast on large datasets). Rather our priority is to *develop code that works*, and is *easy to understand*. Also this exercise is not all about coding, but includes doing some simple maths too.
###Code
%pylab inline
import numpy as np
import matplotlib.pyplot as plt
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
The Dataset We start with generating the data for binary classification. We are going to re-use a slightly modified dataset generation funciton from last week. The difference is that now the two classes are encoded as $0$ and $1$. In addition, we are not going to use collections any more.
###Code
def generate_s_shaped_data(gap=3):
X = np.random.randn(80, 2)
X[10:20] += np.array([3, 4])
X[20:30] += np.array([0, 8])
X[30:40] += np.array([3, 12])
X[40:50] += np.array([gap, 0])
X[50:60] += np.array([3 + gap, 4])
X[60:70] += np.array([gap, 8])
X[70:80] += np.array([3 + gap, 12])
y = np.hstack([np.zeros(40), np.ones(40)])
return X, y
###Output
_____no_output_____
###Markdown
We will use this function to generate data that is in general separable, but requires a non-linear separation boundary.
###Code
X,y = generate_s_shaped_data(5)
plt.plot(X[y==0,0], X[y==0,1], "o")
plt.plot(X[y==1,0], X[y==1,1], "o")
###Output
_____no_output_____
###Markdown
The Model Next, we define a particular ANN configuration that we are going to implement (see the figure below). We are working with two-dimensional input, hence two input units. Furthermore, we are going to do binary classification, for which one output unit would be sufficient. However, just to try backpropagation with multidimensional output, we are going to use two output units and their values will be indicative of conditional probabilities of each class $P(y=class_i|\bf{x},\bf{v},\bf{w})$. Finally, the number of hidden units $p$ will be provided by the user. ![](http://www.andreykan.com/stuff/workshop5-ann.png) The equations define how the model is computed. Here $\sigma$ denotes logistic function. The derivatives are not used during model computation (forward computation), but we are going to need the soon during training. We are going to implement this ANN. Note that you can use *tanh* funciton from numpy, but we need to implement the logistic function.
###Code
def logistic(s):
if (s > 100):
return 1 # to prevent overflow
return 1 / (1 + np.math.exp(-s))
###Output
_____no_output_____
###Markdown
First, let's implement the forward computation for a single instance given some parameter values. Note that this function returns the output layer values.
###Code
# x is a 2 element input representing a single training instance;
#
# V is a matrix with 3 rows and p columns, where the three rows
# correspond to the bias and weights for the two inputs,
# and columns correspond to hidden units;
#
# W is a matrix with (p+1) rows and 2 columns, where the rows
# correspond to the bias and p hidden units, and columns
# correspond to output elements;
#
# returns: a two element output layer, and a vector of hidden
# node values with (p+1) elements, where the first
# element is constant 1
#
def compute_forward(x,V,W):
# append input, so that the bias can be handled naturally
x_ext = np.append(1, x)
# get the number of hidden units
p = V.shape[1]
u = np.zeros((p))
# iterate over hidden units
for i in range(p):
r = x_ext.dot(V[:,i])
u[i] = np.tanh(r)
# append hidden layer, so that the bias can be handled naturally
u_ext = np.append(1, u)
# set the outputs
z = np.zeros((2))
z[0] = logistic(u_ext.dot(W[:,0]))
z[1] = logistic(u_ext.dot(W[:,1]))
return z, u_ext
###Output
_____no_output_____
###Markdown
Next, let's implement a function that makes predictions based on the output layer values. This function is going to make predictions for the entire dataset. After implementing these two functions, you might like to play with toy data and manually picked parameter values just to validate the implementation.
###Code
# X is a matrix with N rows and 2 columns, where
# rows represent training instances
#
# V and W have the same interpretation as in compute_forward()
#
# returns: an N element vector with predictions (0 or 1 each)
#
def ann_predict(X,V,W):
num_examples = X.shape[0]
y_pred = np.zeros(num_examples)
for i in range(num_examples):
curr_x = X[i,:]
z,_ = compute_forward(curr_x, V, W)
if (z[0] < z[1]):
y_pred[i] = 1
return y_pred
###Output
_____no_output_____
###Markdown
Training the ANN Loss Function Now that we have a model, we need to develop a training algorithm. Recall that the idea is to define a loss function and then find parameter values that minimise the loss. Each training example comes with a true label which is either $0$ or $1$. For convenicence, we are going to encode the label as a two-component vector $\bf{y}$, such that only one of the components is one and another one is zero. Moreover, we will make a simplifying assumption that the two components are independent to get $$P\left(\bf{y}|\bf{x},\bf{V},\bf{W}\right)=\prod\limits_{k=1,2}z_k\left(\bf{x},\bf{V},\bf{W}\right)^{y_k}\left(1 - z_k\left(\bf{x},\bf{V},\bf{W}\right)\right)^{1-y_k}$$ This is clearly a wrong assumption, but it is going to be good enough for us to get an ANN working. This assumption can be dropped by using an additional special layer called *soft-max layer*, but this is beyond the scope of this tutorial. Taking the algorithm of this probability and inverting the sign, so that maximising probability leads to minimising the loss, gives us cross-entropy loss (for a single training example) $$l\left(\bf{V},\bf{W}\right)=-\sum\limits_{k=1,2}y_kln(z_k)+(1-y_k)ln\left(1 - z_k\right)$$ Implement a function that computes the loss for a single training example and true label encoded as vector $\bf{y}$.
###Code
# x, V and W have the same interpretation as in compute_forward()
#
# y is a two element encoding of a binary label, either t[0] = 1
# and t[1] = 0, or the other way around
#
# returns: loss for a given training example and parameters
#
def compute_loss(x,y,V,W):
z,_ = compute_forward(x,V,W)
z_off = 0.000001 # to prevent taking log of zero
l1 = -y[0]*np.log(z[0] + z_off) - (1 - y[0])*np.log(1 - z[0] + z_off)
l2 = -y[1]*np.log(z[1] + z_off) - (1 - y[1])*np.log(1 - z[1] + z_off)
l = l1 + l2
return l
###Output
_____no_output_____
###Markdown
Backpropagation We are going to use stochastic gradient descent, and in each iteration of this algorithm we need to compute parameter updates. The updates are based on partial derivatives $\frac{\partial l}{\partial v_{ij}}$ and $\frac{\partial l}{\partial w_{jk}}$. We are going to compute these derivatives using auxiliary quantities $\delta_k$ and $\varepsilon_{jk}$. Note that the multidimensional output, $\varepsilon_{jk}$, has two indices. Also note that the equations below assume that $x$ is a three-dimensional vector, after appending with a constant one to capture the bias, and, similarly, that $u$ is a $(p+1)$-dimensional vector with the first element constant one. Let $l_k=-y_kln(z_k)-(1-y_k)ln\left(1 - z_k\right)$. The auxiliary quantities are $\delta_k=\frac{\partial l}{\partial s_k}$ and $\varepsilon_{jk}=\frac{\partial l_k}{\partial r_j}$. Use the identities provided in the ANN figure above to verify that $\delta_k=\frac{\partial l_k}{\partial z_k}\frac{\partial z_k}{\partial s_k}=(z_k-y_k)$ $\frac{\partial l}{\partial w_{jk}}=\delta_ku_{j}$ $\varepsilon_{jk}=\frac{\partial l_k}{\partial z_k}\frac{\partial z_k}{\partial s_k}\frac{\partial s_k}{\partial u_j}\frac{\partial u_j}{\partial r_j}=\delta_k(1-u^2_j)w_{jk}$ $\frac{\partial l}{\partial v_{ij}}=\frac{\partial l_1}{\partial v_{ij}}+\frac{\partial l_2}{\partial v_{ij}}=\varepsilon_{j1}x_i+\varepsilon_{j2}x_i$ Now use these equations to implement a single update step.
###Code
# x, V and W have the same interpretation as in compute_forward()
#
# y has the same interpretation as in compute_loss()
#
# returns: updated estimates of V and W
#
def update_params(x,y,V,W,eta):
### forward computation
z, u_ext = compute_forward(x,V,W)
### backward computation
d = z - y
dW = np.zeros((W.shape))
dW[:,0] = d[0]*u_ext
dW[:,1] = d[1]*u_ext
e = np.zeros((W.shape))
e[:,0] = d[0]*W[:,0]*(1-np.square(u_ext))
e[:,1] = d[1]*W[:,1]*(1-np.square(u_ext))
# append input, so that the bias can be handled naturally
x_ext = np.append(1, x)
dV = np.zeros((V.shape))
for i in range(x_ext.shape[0]):
v = e[:,0].T*x_ext[i] + e[:,1].T*x_ext[i]
dV[i,:] = v[1:]
V += -eta*dV
W += -eta*dW
return V,W
###Output
_____no_output_____
###Markdown
And finally use the single update step in a function that performs training.
###Code
# X is a matrix with N rows and 2 columns, where
# rows represent training instances
#
# V0 and W0 are starting parameter values
# as before, V0 is a matrix with 3 rows and p columns, where
# the three rows correspond to the bias and weights for
# the two inputs, and columns correspond to hidden units;
#
# W0 is a matrix with (p+1) rows and 2 columns, where the rows
# correspond to the bias and p hidden units, and columns
# correspond to output elements;
#
# y is an N element array of true labels
#
# returns: trained values for V and W, as well as total loss
# after each training epoch
#
def ann_train(X,y,V0,W0):
# use starting values
V = V0
W = W0
# step scale; note that this is usually changed (decreased)
# between iterations, but we won't bother here
eta = 0.01
# number of rounds over the data
num_epochs = 50
# number of training examples
num_examples = X.shape[0]
# calculate total loss in each epoch
l_total = np.zeros(num_epochs)
# make several rounds over the data
for j in range(num_epochs):
# iterate trough each training example
l_total[j] = 0
for i in range(num_examples):
curr_x = X[i,:]
curr_y = np.zeros(2)
curr_y[0] = (y[i] == 0)
curr_y[1] = 1 - curr_y[0]
V,W = update_params(curr_x,curr_y,V,W,eta)
l_total[j] += compute_loss(curr_x,curr_y,V,W)
return V,W,l_total
###Output
_____no_output_____
###Markdown
Let's try everything in action! We will start from some randomly generated parameters, perform training and compare the accuracy before and after the training. *Why not start with all parameters equal to zero?*
###Code
# number of hidden units
p = 7
# initialisation
V0 = np.random.randn(2 + 1, p)
W0 = np.random.randn(p + 1, 2)
y_pred = ann_predict(X,V0,W0)
print('Proportion misclassified:')
prop = 1 - (np.sum(y_pred == y) / float(y.shape[0]))
#if prop > 0.5:
# prop = 1 - prop
print(prop)
V,W,l_total = ann_train(X,y,V0,W0)
plt.figure()
plt.plot(range(l_total.shape[0]), l_total, '.-')
y_pred = ann_predict(X,V,W)
print('Proportion misclassified:')
prop = 1 - (np.sum(y_pred == y) / float(y.shape[0]))
#if prop > 0.5:
# prop = 1 - prop
print(prop)
###Output
Proportion misclassified:
0.4625
Proportion misclassified:
0.1375
|
doc/3_basic_dataframe_operations.ipynb | ###Markdown
Basic dataframe operationsIn this chapter we will explore some of the basic operations you can perform on dataframes.The first task is to read some data into a dataframe.
###Code
import pandas as pd
from audiolabel import read_label
flist = ['resource/two_plus_two_1.tg', 'resource/three_plus_five_1.tg']
[phdf, wddf] = read_label(flist, 'praat', addcols=['fidx'])
wddf
###Output
_____no_output_____
###Markdown
Viewing dataframesHere are a few ways to explore ways to interact with the contents of a dataframe. Let's starting with a dataframe object. The dot `'.'` following the dataframe's name is how we access its methods. Try clicking after the dot in the following cell and then press the `Tab` key.You'll see a list of available methods. Scroll through the list with the arrow keys to review the possible actions you can perform on a dataframe.
###Code
phdf.
###Output
_____no_output_____
###Markdown
Chapter 1 introduced the `head()` method to show the first few rows of a dataframe. The `tail()` method shows the last few rows.
###Code
wddf.head()
wddf.tail()
###Output
_____no_output_____
###Markdown
Getting basic dataframe infoA number dataframe attributes give detailed information about its contents.The `shape` attribute tells you how many rows and columns are present.
###Code
wddf.shape # rows, columns
###Output
_____no_output_____
###Markdown
The `len()` function returns the number of dataframe rows. Note that `len()` is not a dataframe method.
###Code
len(wddf) # not wddf.len()
wddf.shape[0] == len(wddf)
###Output
_____no_output_____
###Markdown
The column labels are accessible through the `columns` attribute
###Code
wddf.columns
###Output
_____no_output_____
###Markdown
The length of the `columns` is the number of columns.
###Code
len(wddf.columns)
wddf.shape[1] == len(wddf.columns)
###Output
_____no_output_____
###Markdown
To find out what kinds of values are stored in your columns, use the `dtypes` attribute.
###Code
wddf.dtypes
###Output
_____no_output_____
###Markdown
You can also view the dataframe's index, which is used in row selection and combining operations.
###Code
wddf.index#.values
###Output
_____no_output_____
###Markdown
Renaming columnsSometimes you need to assign names to your columns, perhaps because you read a headerless text file with `read_csv()` and didn't assign column names with the `names` parameter. You can add names to an existing dataframe by assigning to the `columns` attribute.
###Code
nhdf = pd.read_csv('resource/two_plus_two_1.nohead.ifc', sep='\t', header=None)
nhdf.tail()
nhdf.columns = ['sec', 'rms', 'f1', 'f2', 'f3', 'f4', 'f0']
nhdf.tail()
###Output
_____no_output_____
###Markdown
You can overwrite existing column names. The next cell converts all the column names to upper case. Execute the cell, then try converting back to lower case with `lower()`.
###Code
phdf.columns = [c.upper() for c in phdf.columns]
phdf.tail()
###Output
_____no_output_____
###Markdown
If you want to rename only some of the columns, you can use `rename()` with a dict that maps old names to new names.
###Code
nhdf = nhdf.rename(columns={'sec': 'seconds', 'rms': 'rootmnsq'})
nhdf.tail()
###Output
_____no_output_____
###Markdown
Notice that dataframe methods do not generally modify an existing dataframe unless you ask for modification. These methods usually return a copy of the modified dataframe, and you can assign that to a variable of the same name as the input. Alternatively, you can use `inplace=True` as a paramter to modify a dataframe directly. Getting summary informationYou can get a quick summary of your dataframe with `describe()`.
###Code
nhdf.describe()
###Output
_____no_output_____
###Markdown
Many other descriptive statistics are available as dataframe methods. See the pandas docs for a [convenient list of available methods](https://pandas.pydata.org/pandas-docs/stable/basics.htmldescriptive-statistics).
###Code
nhdf.mean()
nhdf.std()
###Output
_____no_output_____ |
docs/_static/notebooks/data_parallel.ipynb | ###Markdown
Using Torchbearer with PyTorch DataParallelPyTorch offers a very simple way to take advantage of multiple GPUs when training large models on lots of data through [DataParallel](https://pytorch.org/docs/stable/nn.html?highlight=data%20paralleltorch.nn.DataParallel). They have a very good tutorial on how to use this for base PyTorch models [here](https://pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html) and we recommend you familiarise yourself with it before moving on with this tutorial. **Note**: If your model doesn't require state then Torchbearer works immediately with DataParallel without needing any of the steps in this tutorial. Unfortunately, DataParallel cannot directly pass the main Torchbearer state dictionary to models running on multiple GPUs, which leads to problems with Torchbearer models needing to access and manipulate state on the forward pass. In this tutorial we demonstrate a callback that allows us to use such models across multiple devices without loss of functionality. **Note**: The easiest way to use this tutorial is as a colab notebook, which allows you to dive in with no setup. We recommend you enable a free GPU with> **Runtime** → **Change runtime type** → **Hardware Accelerator: GPU** Install TorchbearerFirst we install torchbearer if needed.
###Code
try:
import torchbearer
except:
!pip install -q torchbearer
import torchbearer
# If problems arise, try
# !pip install git+https://github.com/pytorchbearer/torchbearer
# import torchbearer
print(torchbearer.__version__)
# Create some state keys for this example
A_KEY = torchbearer.state_key('a_key')
NEW_KEY = torchbearer.state_key('new_key')
###Output
0.4.0.dev
###Markdown
UnpackState CallbackThe callback that we will be using throughout this example to interface with state in DataParallel is called the [UnpackState callback](https://torchbearer.readthedocs.io/en/latest/code/callbacks.htmlmodule-torchbearer.callbacks.unpack_state). This callback takes a list of keys which are the items in state that you wish to access and packs them as a dictionary (under the same keys) which gets passed to the model forward. For a quick example, we build a very simple model that just prints the input. Then we create an unpacker callback that asks for the keys torchbearer.X and a newly defined key A_KEY, which we populate before the run. We then run a Trial with this callback and model and observe that the model gets passed a dictionary with these two keys and their values.
###Code
import torch
import torch.nn as nn
from torchbearer.callbacks import UnpackState
from torchbearer import Trial
class TestModel(nn.Module):
def forward(self, x):
print(str(x).replace('\n', ''))
return x
unpacker = UnpackState(keys=[torchbearer.X, A_KEY])
t = Trial(TestModel(), callbacks=[unpacker])
t.state[A_KEY] = 'test'
_ = t.with_train_data(torch.ones(10, 1), torch.ones(10, 1), batch_size=1, steps=1).run()
###Output
_____no_output_____
###Markdown
Often when models interact with state they want to interact with it in some way, by adding values or modifying existing ones. The UnpackState callback allows this by updating the main state based on the model output. When a model returns a dictionary which includes a torchbearer.Y_PRED key, the the state dictionary will be automatically updated based on this output. We demonstrate this below by printing a previously unpopulated value in state after a model forward pass.
###Code
class TestModel2(nn.Module):
def forward(self, x):
return {torchbearer.Y_PRED: x, NEW_KEY: 'New Key is here'}
unpacker = UnpackState(output_to_state=True)
@torchbearer.callbacks.on_step_training
def print_state(state):
print(state[NEW_KEY])
t = Trial(TestModel2(), callbacks=[unpacker, print_state])
_ = t.with_train_data(torch.ones(10, 1), torch.ones(10, 1), batch_size=1, steps=1).run()
###Output
_____no_output_____
###Markdown
Building a more Advanced ModelOne type of model that often needs to access state is a VAE. In this example we will take the same model from the VAE example notebook [here](https://torchbearer.readthedocs.io/en/latest/examples/notebooks.htmldeep-learning) and run it with DataParallel across all available GPUs. We define a very similar model, but modified to utilise the UnpackState callback method rather than the full state dictionary.
###Code
import torch.nn as nn
import torch.nn.functional as F
MU, LOGVAR = torchbearer.state_key('mu'), torchbearer.state_key('logvar')
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3)).view(-1, 1, 28, 28)
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 784))
z = self.reparameterize(mu, logvar)
return {torchbearer.Y_PRED: self.decode(z), MU: mu, LOGVAR: logvar}
###Output
_____no_output_____
###Markdown
We now copy the loss functions and data related methods from the VAE example.
###Code
# LOSSES
def binary_cross_entropy(y_pred, y_true):
BCE = F.binary_cross_entropy(y_pred.view(-1, 784), y_true.view(-1, 784), reduction='sum').view(1) # DataParallel doesnt like size([]) tensors
return BCE
def kld(mu, logvar):
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return KLD
main_loss = binary_cross_entropy
@torchbearer.callbacks.add_to_loss
def add_kld_loss_callback(state):
KLD = kld(state[MU], state[LOGVAR])
return KLD
# DATA
from torch.utils.data.dataset import Dataset
import torchvision
from torchvision import transforms
from torchbearer.cv_utils import DatasetValidationSplitter
class AutoEncoderMNIST(Dataset):
def __init__(self, mnist_dataset):
super().__init__()
self.mnist_dataset = mnist_dataset
def __getitem__(self, index):
character, label = self.mnist_dataset.__getitem__(index)
return character, character
def __len__(self):
return len(self.mnist_dataset)
BATCH_SIZE = 128
transform = transforms.Compose([transforms.ToTensor()])
# Define standard classification mnist dataset with random validation set
dataset = torchvision.datasets.MNIST('./data/mnist', train=True, download=True, transform=transform)
splitter = DatasetValidationSplitter(len(dataset), 0.1)
basetrainset = splitter.get_train_dataset(dataset)
basevalset = splitter.get_val_dataset(dataset)
basetestset = torchvision.datasets.MNIST('./data/mnist', train=False, download=True, transform=transform)
# Wrap base classification mnist dataset to return the image as the target
trainset = AutoEncoderMNIST(basetrainset)
valset = AutoEncoderMNIST(basevalset)
testset = AutoEncoderMNIST(basetestset)
traingen = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=8)
valgen = torch.utils.data.DataLoader(valset, batch_size=BATCH_SIZE, shuffle=True, num_workers=8)
testgen = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False, num_workers=8)
# IMAGING
from torchbearer.callbacks import imaging
targets = imaging.MakeGrid(torchbearer.TARGET, num_images=64, nrow=8)
targets = targets.on_test().to_pyplot().to_file('targets.png')
predictions = imaging.MakeGrid(torchbearer.PREDICTION, num_images=64, nrow=8)
predictions = predictions.on_test().to_pyplot().to_file('predictions.png')
###Output
_____no_output_____
###Markdown
Finally we can create the UnpackState callback and the Trial. We run with DataParallel across all the GPUs, which for this particular model is slower than just running on a single GPU, but for a very large model, this might be the only feasible way to run.
###Code
import torch.optim as optim
device = 'cuda' if torch.cuda.is_available() else 'cpu'
num_devices = torch.cuda.device_count()
model = VAE()
try:
model = nn.DataParallel(model.to('cuda'), device_ids=list(range(num_devices)))
print("Running on devices: {}".format(list(range(num_devices))))
except Exception as e:
print("Cannot initialise DataParallel model.")
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss = binary_cross_entropy
trial = Trial(model, optimizer, main_loss, metrics=['acc', 'loss'],
callbacks=[UnpackState(output_to_state=True), add_kld_loss_callback, predictions, targets]).to(device)
trial.with_generators(train_generator=traingen, val_generator=valgen, test_generator=testgen)
_ = trial.run(epochs=10, verbose=1)
trial.evaluate(data_key=torchbearer.TEST_DATA)
###Output
Running on devices: [0, 1]
|
pandas/07_Visualization/Chipotle/Solutions.ipynb | ###Markdown
Visualizing Chipotle's Data This time we are going to pull data directly from the internet.Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. Step 1. Import the necessary libraries
###Code
import pandas as pd
import collections
import matplotlib.pyplot as plt
# set this so the
%matplotlib inline
###Output
_____no_output_____ |
notebooks/indicator_demo.ipynb | ###Markdown
Demonstrates how to load simulation results in an h5 and then pull them from orca.
###Code
import gc
import pandas as pd
import orca
from smartpy_sim_indicators import *
# location of the h5 containing sim results
# change this to wherever you have downloaded the results h5 to
sim_h5 = r'\\azsmart\AZSmartData\proj18_19\for_indicators.h5'
# output h5 containing indicator/aggregation results
out_h5 = r'D:\temp\indicators_results.h5'
# years present w/in the h5 results
list_store_years(sim_h5)
# tables present in the base year
# of these, most commonly used are buildings
list_store_tables(sim_h5, 'base')
# tables present in simulated years
list_store_tables(sim_h5, '2020')
# note: the parcels are not modified, so they only exist in the base
# and do not need to be loaded for simualted years
list_store_years(sim_h5, 'parcels')
# load all base year tables
load_tables(sim_h5, 'base')
orca.list_tables()
###Output
_____no_output_____
###Markdown
__data model overview__See variables.py for additional orca computed columns available. ___parcels___- Mostly contain geographic identifiers that are broadcast to other tables- Only need to be loaded from the base
###Code
orca.get_table('parcels').local.head()
###Output
_____no_output_____
###Markdown
___buildings___- Need to be loaded every year--even if not pulling attributes explicitly--otherwise the parcel broadcast to downstream tables (e.g. households) will fail.- Links to parcels via parcel_id column- Typical summary attributes: residential_units, building_type_name, non_residential_sqft, job_spaces
###Code
orca.get_table('buildings').local
###Output
_____no_output_____
###Markdown
___households___- Links to building via building_id column.- Has a downstream dependency on persons, so if pulling person attributes need to always load households aslo. - Typical summary attributes: persons, income_quintile, workers
###Code
orca.get_table('households').to_frame(['persons', 'income', 'income_quintile', 'workers'])
###Output
_____no_output_____
###Markdown
___persons___- Only contains persons in households- Links to households via household_id column- Note: if you only need a simple pop count, can just pull persons attribute from households table- Typical summary attributes: age, race_ethnicity, education level
###Code
orca.get_table('persons').local
###Output
_____no_output_____
###Markdown
___gq_persons___- Only contains persons in group quarters- Links directly to buildings via building_id column- Mostly just used to get total persons- Typical summary attributes: gq_type
###Code
orca.get_table('gq_persons').local
###Output
_____no_output_____
###Markdown
___seaonal_households___- Seasonal households and persons- Links directly to buildings via building_id column- Since we have no information on the attributes of seasonal persons, no need for additional table- Typical summary attributes: persons
###Code
orca.get_table('seasonal_households').local
###Output
_____no_output_____
###Markdown
__example indicator generation__Typically we: - Define a simple function that acts on the provided geography- Write a loop that iterates through the desired years: 1.) Load tables for the given year 2.) Get the summary for that year - Compile the results in some way 1.) Compile as columns, e.g. pop_2020, pop_2030, ... 2.) Compile as rows, 1 row for each year
###Code
def get_summary(by):
"""
Simple aggregation function.
Parameters:
----------
by: str or list of str
The columns to groupby.
Returns:
--------
pandas.DataFrame
"""
if not isinstance(by, list):
by = [by]
# pull tables
pers = orca.get_table('persons').to_frame()
j = orca.get_table('jobs').to_frame(by)
gq = orca.get_table('gq_persons').to_frame(by)
seas = orca.get_table('seasonal_households').to_frame(by + ['persons'])
# groupbys
sum_tab = pd.concat([
pers.groupby(by).size().to_frame('hh_pop'),
gq.groupby(by).size().to_frame('gq_pop'),
seas.groupby(by)['persons'].sum().to_frame('seas_persons'),
j.groupby(by).size().to_frame('jobs')
], axis=1).fillna(0)
# get the total resident pop (hh pop + gq pop)
sum_tab['total_pop'] = sum_tab['hh_pop'] + sum_tab['gq_pop']
return sum_tab
# tables we need to process for the aggregation
tabs_to_process = [
'buildings',
'households',
'persons',
'seasonal_households',
'jobs'
]
# generate year-based indicators by mpa (Municpal Planning Area) and county
mpa_sums = get_indicators(
sim_h5,
[2020, 2030, 2040],
tabs_to_process,
['county', 'mpa'],
get_summary
)
mpa_sums.keys()
# indicators compiled as columns
compile_to_cols(mpa_sums, collapse_row_idx=False).head()
# indicators compiled as temporal rows
compile_to_rows(mpa_sums, collapse_row_idx=False).head()
###Output
_____no_output_____
###Markdown
___compute indicators across several groupings___
###Code
# things we want to aggregate by
by_items = {
'mpa': ['mpa', 'county'],
'city': ['city', 'county'],
'mazes': 'maz',
'tazes': 'taz',
'sections': 'section_id',
'hex': 'hex_id',
}
# get the sums
many_sums = get_indicators(
sim_h5,
[2018] + list(range(2020, 2056, 5)),
tabs_to_process,
by_items,
get_summary
)
many_sums.keys()
# compile as columns
as_cols = {}
for k, v in many_sums.items():
as_cols[k] = compile_to_cols(v)
as_cols['tazes']
# compile as rows
as_rows = {}
for k, v in many_sums.items():
as_rows[k] = compile_to_rows(v)
as_rows['hex']
# write result to h5
with pd.HDFStore(out_h5, mode='w', complevel=1) as s:
for k in many_sums.keys():
s['by_col/{}'.format(k)] = as_cols[k]
s['by_row/{}'.format(k)] = as_rows[k]
s = pd.HDFStore(out_h5, mode='r')
s.keys()
s['/by_col/tazes']
list(s['/by_col/tazes'].columns)
s['/by_row/sections']
s.close()
###Output
_____no_output_____ |
Cat vs Dog-2.ipynb | ###Markdown
Cat vs Dog part-2(Binary class classification)Data AugmentationImageDataGenerator In part-1, the training accuracy is close to 100%, and the validation accuracy is in the 70%-80% range. That was a great example of overfitting -- which in short means that it can do very well with images it has seen before, but not so well with images it hasn't. Let's see if we can do better to avoid overfitting -- and one simple method is to augment the images a bit. If you think about it, most pictures of a cat are very similar -- the ears are at the top, then the eyes, then the mouth etc. Things like the distance between the eyes and ears will always be quite similar too. What if we tweak with the images to change this up a bit -- rotate the image, squash it, etc. That's what image augementation is all about. And there's an API that makes it easy...Now take a look at the ImageGenerator. There are properties on it that you can use to augment the image. ``` Updated to do image augmentationtrain_datagen = ImageDataGenerator( rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')```These are just a few of the options available (for more, see the Keras documentation. Let's quickly go over what we just wrote:* rotation_range is a value in degrees (0–180), a range within which to randomly rotate pictures.* width_shift and height_shift are ranges (as a fraction of total width or height) within which to randomly translate pictures vertically or horizontally.* shear_range is for randomly applying shearing transformations.* zoom_range is for randomly zooming inside pictures.* horizontal_flip is for randomly flipping half of the images horizontally. This is relevant when there are no assumptions of horizontal assymmetry (e.g. real-world pictures).* fill_mode is the strategy used for filling in newly created pixels, which can appear after a rotation or a width/height shift.Here's some code. Run it to see the impact.
###Code
!wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \
-O /tmp/cats_and_dogs_filtered.zip
#importing libraries
import os
import zipfile
import tensorflow as tf
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#unzip
local_zip = '/tmp/cats_and_dogs_filtered.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
base_dir = '/tmp/cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
INPUT_SHAPE = (150, 150)
MODEL_INPUT_SHAPE = INPUT_SHAPE + (3,)
#HYPERPARAMETERS
LEARNING_RATE = 1e-4
BATCH_SIZE = 20
EPOCHS = 50
#model architecture
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape = MODEL_INPUT_SHAPE),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=LEARNING_RATE),
metrics=['accuracy'])
#summary of model (including type of layer, Ouput shape and number of parameters)
model.summary()
#plotting model and saving it architecture picture
dot_img_file = '/tmp/model_1.png'
tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
'''
This code has changed. Now instead of the ImageGenerator just rescaling
the image, we also rotate and do other operations.
'''
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size=INPUT_SHAPE, # All images will be resized to 150x150
batch_size=BATCH_SIZE,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow validation images in batches of 20 using test_datagen generator
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=INPUT_SHAPE,
batch_size=BATCH_SIZE,
class_mode='binary')
#Fitting data into model -> training model
history = model.fit(
train_generator,
steps_per_epoch=100, # steps = 2000 images / batch_size
epochs=EPOCHS,
validation_data=validation_generator,
validation_steps=50, # steps = 1000 images / batch_size
verbose=1)
#PLOTTING model performance
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.figure()
plt.plot(epochs, loss, 'ro', label='Training Loss')
plt.plot(epochs, val_loss, 'r', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
I got :- accuracy: 0.7985 and val_accuracy: 0.7680You can see that now the accuracy is close to validation accuracy ~76%. Hence solved the overfitting. Notice that the accuracy is lower than the accuracy we get in part-1. But this model is much better than that model.
###Code
###Output
_____no_output_____ |
docs/_static/demos/io/WriteMmtfCustomSubsetDemo.ipynb | ###Markdown
Write MMTF Subset DemoSimple example writting a subset of mmtf files Imports
###Code
from pyspark import SparkConf, SparkContext
from mmtfPyspark.io import mmtfReader, mmtfWriter
from mmtfPyspark.filters import ExperimentalMethods, Resolution, RFree
from mmtfPyspark.structureViewer import view_structure
###Output
_____no_output_____
###Markdown
Configure Spark
###Code
conf = SparkConf().setMaster("local[*]") \
.setAppName("WriteMMTFCustomSubset")
sc = SparkContext(conf = conf)
###Output
_____no_output_____
###Markdown
Read in a fractions of entries from a local Hadoop Sequence File
###Code
path = "../../resources/mmtf_full_sample/"
fraction= 0.5
seed = 123
pdb = mmtfReader.read_sequence_file(path, sc, fraction = fraction, seed = seed)
count = pdb.count()
print(f'number of pdb entries read : {count}')
###Output
number of pdb entries read : 2215
###Markdown
Retain high resolution X-ray structures
###Code
pdb = pdb.filter(ExperimentalMethods(ExperimentalMethods.X_RAY_DIFFRACTION)) \
.filter(Resolution(0,2.0)) \
.filter(RFree(0,2.0))
print(f'number of pdb entries left : {pdb.count()}')
###Output
number of pdb entries left : 510
###Markdown
Visualize Structures
###Code
structures = pdb.keys().collect()
view_structure(structures)
###Output
_____no_output_____
###Markdown
Save this subset in a Hadoop Sequence File
###Code
write_path = "./mmtf_subset_xray"
# Reduce RDD to 8 partitiions
pdb = pdb.coalesce(8)
mmtfWriter.write_sequence_file(write_path, sc, pdb)
###Output
_____no_output_____
###Markdown
Terminate Spark
###Code
sc.stop()
###Output
_____no_output_____ |
Udacity Course/recurrent-neural-networks/char-rnn/Character_Level_RNN_Exercise.ipynb | ###Markdown
Character-Level LSTM in PyTorchIn this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. **This model will be able to generate new text based on the text from the book!**This network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Below is the general architecture of the character-wise RNN. First let's load in our required resources for data loading and model creation.
###Code
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
###Output
_____no_output_____
###Markdown
Load in DataThen, we'll load the Anna Karenina text file and convert it into integers for our network to use.
###Code
# open text file and read in data as `text`
with open('data/anna.txt', 'r') as f:
text = f.read()
###Output
_____no_output_____
###Markdown
Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.
###Code
text[:100]
###Output
_____no_output_____
###Markdown
TokenizationIn the cells, below, I'm creating a couple **dictionaries** to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
###Code
# encode the text and map each character to an integer and vice versa
# we create two dictionaries:
# 1. int2char, which maps integers to characters
# 2. char2int, which maps characters to unique integers
chars = tuple(set(text))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
# encode the text
encoded = np.array([char2int[ch] for ch in text])
###Output
_____no_output_____
###Markdown
And we can see those same characters from above, encoded as integers.
###Code
encoded[:100]
###Output
_____no_output_____
###Markdown
Pre-processing the dataAs you can see in our char-RNN image above, our LSTM expects an input that is **one-hot encoded** meaning that each character is converted into an integer (via our created dictionary) and *then* converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that!
###Code
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((arr.size, n_labels), dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
# check that the function works as expected
test_seq = np.array([[3, 5, 1]])
one_hot = one_hot_encode(test_seq, 8)
print(one_hot)
###Output
_____no_output_____
###Markdown
Making training mini-batchesTo train on this data, we also want to create mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:In this example, we'll take the encoded characters (passed in as the `arr` parameter) and split them into multiple sequences, given by `batch_size`. Each of our sequences will be `seq_length` long. Creating Batches**1. The first thing we need to do is discard some of the text so we only have completely full mini-batches. **Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences in a batch) and $M$ is the seq_length or number of time steps in a sequence. Then, to get the total number of batches, $K$, that we can make from the array `arr`, you divide the length of `arr` by the number of characters per batch. Once you know the number of batches, you can get the total number of characters to keep from `arr`, $N * M * K$.**2. After that, we need to split `arr` into $N$ batches. ** You can do this using `arr.reshape(size)` where `size` is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences in a batch, so let's make that the size of the first dimension. For the second dimension, you can use `-1` as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$.**3. Now that we have this array, we can iterate through it to get our mini-batches. **The idea is each batch is a $N \times M$ window on the $N \times (M * K)$ array. For each subsequent batch, the window moves over by `seq_length`. We also want to create both the input and target arrays. Remember that the targets are just the inputs shifted over by one character. The way I like to do this window is use `range` to take steps of size `n_steps` from $0$ to `arr.shape[1]`, the total number of tokens in each sequence. That way, the integers you get from `range` always point to the start of a batch, and each window is `seq_length` wide.> **TODO:** Write the code for creating batches in the function below. The exercises in this notebook _will not be easy_. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, **type out the solution code yourself.**
###Code
def get_batches(arr, batch_size, seq_length):
'''Create a generator that returns batches of size
batch_size x seq_length from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
seq_length: Number of encoded chars in a sequence
'''
## TODO: Get the number of batches we can make
n_batches = len(arr)//batch_size_total
## TODO: Keep only enough characters to make full batches
arr = arr[:n_batches*batch_size_total]
## TODO: Reshape into batch_size rows
arr = arr.reshape((batch_size-1))
## TODO: Iterate over the batches using a window of size seq_length
for n in range(0, arr.shape[1], seq_length):
# The features
x = arr[:, n:n+seq_length]
# The targets, shifted by one
y = np-zeros_like(x)
try:
y[:,:-1], y[:,-1]=x[:,1:], arr[:, n+seq_length]
except IndexError:
y[:,:-1], y[:,-1]=x[:,1:], arr[:,0]
yield x, y
###Output
_____no_output_____
###Markdown
Test Your ImplementationNow I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps.
###Code
batches = get_batches(encoded, 8, 50)
x, y = next(batches)
# printing out the first 10 items in a sequence
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
###Output
_____no_output_____
###Markdown
If you implemented `get_batches` correctly, the above output should look something like ```x [[25 8 60 11 45 27 28 73 1 2] [17 7 20 73 45 8 60 45 73 60] [27 20 80 73 7 28 73 60 73 65] [17 73 45 8 27 73 66 8 46 27] [73 17 60 12 73 8 27 28 73 45] [66 64 17 17 46 7 20 73 60 20] [73 76 20 20 60 73 8 60 80 73] [47 35 43 7 20 17 24 50 37 73]]y [[ 8 60 11 45 27 28 73 1 2 2] [ 7 20 73 45 8 60 45 73 60 45] [20 80 73 7 28 73 60 73 65 7] [73 45 8 27 73 66 8 46 27 65] [17 60 12 73 8 27 28 73 45 27] [64 17 17 46 7 20 73 60 20 80] [76 20 20 60 73 8 60 80 73 17] [35 43 7 20 17 24 50 37 73 36]] ``` although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`. --- Defining the network with PyTorchBelow is where you'll define the network.Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters. Model StructureIn `__init__` the suggested structure is as follows:* Create and store the necessary dictionaries (this has been done for you)* Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching)* Define a dropout layer with `drop_prob`* Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters)* Finally, initialize the weights (again, this has been given)Note that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`. --- LSTM Inputs/OutputsYou can create a basic [LSTM layer](https://pytorch.org/docs/stable/nn.htmllstm) as follows```pythonself.lstm = nn.LSTM(input_size, n_hidden, n_layers, dropout=drop_prob, batch_first=True)```where `input_size` is the number of characters this cell expects to see as sequential input, and `n_hidden` is the number of units in the hidden layers in the cell. And we can add dropout by adding a dropout parameter with a specified probability; this will automatically add dropout to the inputs or outputs. Finally, in the `forward` function, we can stack up the LSTM cells into layers using `.view`. With this, you pass in a list of cells and it will send the output of one cell into the next cell.We also need to create an initial hidden state of all zeros. This is done like so```pythonself.init_hidden()```
###Code
# check if GPU is available
train_on_gpu = torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU!')
else:
print('No GPU available, training on CPU; consider making n_epochs very small.')
class CharRNN(nn.Module):
def __init__(self, tokens, n_hidden=256, n_layers=2,
drop_prob=0.5, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
# creating character dictionaries
self.chars = tokens
self.int2char = dict(enumerate(self.chars))
self.char2int = {ch: ii for ii, ch in self.int2char.items()}
## TODO: define the layers of the model
self.lstm=nn.LSTM(len(self.chars), n_hidden, n_layers, dropout=drop_prob, batch_first=True)
self.dropout=nn.Dropout(drop_prob)
self.fc=nn.Linear(n_hidden, len(self.chars))
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`. '''
## TODO: Get the outputs and the new hidden state from the lstm
r_output, hidden = self.lstm(x,hidden)
out=self.dropout(r_output)
out=out.contigous().view(-1, self.n_hidden)
out=self.fc(out)
# return the final output and the hidden state
return out, hidden
def init_hidden(self, batch_size):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
###Output
_____no_output_____
###Markdown
Time to trainThe train function gives us the ability to set the number of epochs, the learning rate, and other parameters.Below we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual!A couple of details about training: >* Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.* We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients.
###Code
def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10):
''' Training a network
Arguments
---------
net: CharRNN network
data: text data to train the network
epochs: Number of epochs to train
batch_size: Number of mini-sequences per mini-batch, aka batch size
seq_length: Number of character steps per mini-batch
lr: learning rate
clip: gradient clipping
val_frac: Fraction of data to hold out for validation
print_every: Number of steps for printing training and validation loss
'''
net.train()
opt = torch.optim.Adam(net.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
# create training and validation data
val_idx = int(len(data)*(1-val_frac))
data, val_data = data[:val_idx], data[val_idx:]
if(train_on_gpu):
net.cuda()
counter = 0
n_chars = len(net.chars)
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
for x, y in get_batches(data, batch_size, seq_length):
counter += 1
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output, targets.view(batch_size*seq_length).long())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
opt.step()
# loss stats
if counter % print_every == 0:
# Get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for x, y in get_batches(val_data, batch_size, seq_length):
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
x, y = torch.from_numpy(x), torch.from_numpy(y)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
inputs, targets = x, y
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
output, val_h = net(inputs, val_h)
val_loss = criterion(output, targets.view(batch_size*seq_length).long())
val_losses.append(val_loss.item())
net.train() # reset to train mode after iterationg through validation data
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.4f}...".format(loss.item()),
"Val Loss: {:.4f}".format(np.mean(val_losses)))
###Output
_____no_output_____
###Markdown
Instantiating the modelNow we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!
###Code
## TODO: set your model hyperparameters
# define and print the net
n_hidden=512
n_layers=2
net = CharRNN(chars, n_hidden, n_layers)
print(net)
###Output
_____no_output_____
###Markdown
Set your training hyperparameters!
###Code
batch_size = 128
seq_length = 100
n_epochs = 20 # start small if you are just testing initial behavior
# train the model
train(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10)
###Output
_____no_output_____
###Markdown
Getting the best modelTo set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network. HyperparametersHere are the hyperparameters for the network.In defining the model:* `n_hidden` - The number of units in the hidden layers.* `n_layers` - Number of hidden LSTM layers to use.We assume that dropout probability and learning rate will be kept at the default, in this example.And in training:* `batch_size` - Number of sequences running through the network in one pass.* `seq_length` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.* `lr` - Learning rate for trainingHere's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnntips-and-tricks).> Tips and Tricks> Monitoring Validation Loss vs. Training Loss>If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:> - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.> - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)> Approximate number of parameters> The two most important parameters that control the model are `n_hidden` and `n_layers`. I would advise that you always use `n_layers` of either 2/3. The `n_hidden` can be adjusted based on how much data you have. The two important quantities to keep track of here are:> - The number of parameters in your model. This is printed when you start training.> - The size of your dataset. 1MB file is approximately 1 million characters.>These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:> - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `n_hidden` larger.> - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.> Best models strategy>The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.>It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.>By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative. CheckpointAfter training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters.
###Code
# change the name, for saving multiple files
model_name = 'rnn_x_epoch.net'
checkpoint = {'n_hidden': net.n_hidden,
'n_layers': net.n_layers,
'state_dict': net.state_dict(),
'tokens': net.chars}
with open(model_name, 'wb') as f:
torch.save(checkpoint, f)
###Output
_____no_output_____
###Markdown
--- Making PredictionsNow that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text! A note on the `predict` functionThe output of our RNN is from a fully-connected layer and it outputs a **distribution of next-character scores**.> To actually get the next character, we apply a softmax function, which gives us a *probability* distribution that we can then sample to predict the next character. Top K samplingOur predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about [topk, here](https://pytorch.org/docs/stable/torch.htmltorch.topk).
###Code
def predict(net, char, h=None, top_k=None):
''' Given a character, predict the next character.
Returns the predicted character and the hidden state.
'''
# tensor inputs
x = np.array([[net.char2int[char]]])
x = one_hot_encode(x, len(net.chars))
inputs = torch.from_numpy(x)
if(train_on_gpu):
inputs = inputs.cuda()
# detach hidden state from history
h = tuple([each.data for each in h])
# get the output of the model
out, h = net(inputs, h)
# get the character probabilities
p = F.softmax(out, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# get top characters
if top_k is None:
top_ch = np.arange(len(net.chars))
else:
p, top_ch = p.topk(top_k)
top_ch = top_ch.numpy().squeeze()
# select the likely next character with some element of randomness
p = p.numpy().squeeze()
char = np.random.choice(top_ch, p=p/p.sum())
# return the encoded value of the predicted char and the hidden state
return net.int2char[char], h
###Output
_____no_output_____
###Markdown
Priming and generating text Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.
###Code
def sample(net, size, prime='The', top_k=None):
if(train_on_gpu):
net.cuda()
else:
net.cpu()
net.eval() # eval mode
# First off, run through the prime characters
chars = [ch for ch in prime]
h = net.init_hidden(1)
for ch in prime:
char, h = predict(net, ch, h, top_k=top_k)
chars.append(char)
# Now pass in the previous character and get a new one
for ii in range(size):
char, h = predict(net, chars[-1], h, top_k=top_k)
chars.append(char)
return ''.join(chars)
print(sample(net, 1000, prime='Anna', top_k=5))
###Output
_____no_output_____
###Markdown
Loading a checkpoint
###Code
# Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net`
with open('rnn_x_epoch.net', 'rb') as f:
checkpoint = torch.load(f)
loaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers'])
loaded.load_state_dict(checkpoint['state_dict'])
# Sample using a loaded model
print(sample(loaded, 2000, top_k=5, prime="And Levin said"))
###Output
_____no_output_____ |
Nanostring/heatmap.ipynb | ###Markdown
Nanostring heatmap **Goal:**Make a publication style figure from normalized Nanostring data + calculate average pseudotimeSteps: 1) Read in Nanostring normalized raw .csv file 2) Make new file where calculate average 3) Subset with selected gene list (selected_genes_curated.csv) 4) Hierarchical clustering on the rows + plotting row z-score using seaborn.clustermap function 5) read in list of pseudotimes and calculate avg and stdvran this notebook with the docker container866995796105.dkr.ecr.us-east-2.amazonaws.com/scrna:python-3.7-20191025
###Code
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
from gprofiler import GProfiler
import re
from tempfile import NamedTemporaryFile
#import urllib3
import urllib
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
1) Read in Nanostring normalized raw .csv file
###Code
raw_df = pd.read_csv('NormalizedData_cleaned_including_invivo_humanKidney.csv', index_col = 0)
raw_df.head()
###Output
_____no_output_____
###Markdown
2) manually calculate the average of the replicates
###Code
col_names = ['D0', 'D7', 'D10', 'D12', 'D14', 'D24', 'D26', 'D28', 'D10_2w', 'D12_2w', 'D14_2w', 'D10_4w', 'D12_4w', 'D14_4w', 'Fetal_Kidney', 'Adult_Kidney']
avg_df = pd.DataFrame(columns = col_names)
avg_df['D0'] = raw_df[['D0_1','D0_2']].mean(axis=1, skipna=True)
avg_df['D7'] = raw_df[['D7_1','D7_2','D7_3']].mean(axis=1, skipna=True)
avg_df['D10'] = raw_df[['D10_1','D10_2','D10_3','D10_4']].mean(axis=1, skipna=True)
avg_df['D12'] = raw_df[['D12_1','D12_2','D12_3','D12_4']].mean(axis=1, skipna=True)
avg_df['D14'] = raw_df[['D14_1','D14_2','D14_3']].mean(axis=1, skipna=True)
avg_df['D24'] = raw_df[['D24_1','D24_2','D24_3']].mean(axis=1, skipna=True)
avg_df['D26'] = raw_df[['D26_1','D26_2','D26_3','D26_4']].mean(axis=1, skipna=True)
avg_df['D28'] = raw_df[['D28_1','D28_2','D28_3']].mean(axis=1, skipna=True)
avg_df['D10_2w'] = raw_df[['D10_2w_1','D10_2w_2']].mean(axis=1, skipna=True)
avg_df['D12_2w'] = raw_df[['D12_2w_1','D12_2w_2', 'D12_2w_3']].mean(axis=1, skipna=True)
avg_df['D14_2w'] = raw_df[['D14_2w_1','D14_2w_2', 'D14_2w_3', 'D14_2w_4', 'D14_2w_5','D14_2w_6', 'D14_2w_7', 'D14_2w_8', 'D14_2w_9']].mean(axis=1, skipna=True)
avg_df['D10_4w'] = raw_df[['D10_4w_1','D10_4w_2', 'D10_4w_3']].mean(axis=1, skipna=True)
avg_df['D12_4w'] = raw_df[['D12_4w_1','D12_4w_2', 'D12_4w_3', 'D12_4w_4']].mean(axis=1, skipna=True)
avg_df['D14_4w'] = raw_df[['D14_4w_1','D14_4w_2', 'D14_4w_3']].mean(axis=1, skipna=True)
avg_df['Fetal_Kidney'] = raw_df[['fetal Kidney_0']]
avg_df['Adult_Kidney'] = raw_df[['adult Kidney_1','adult Kidney_2']].mean(axis=1, skipna=True)
avg_df.head()
###Output
_____no_output_____
###Markdown
3) Plot full heatmap
###Code
outfile = "./figures_nov20/"
###Output
_____no_output_____
###Markdown
**generate ordered list to split clustered heatmap**
###Code
# Averaged
sns_plot = sns.clustermap(avg_df.iloc[:,0:8],
col_cluster=False,
row_cluster=True,
cmap = 'seismic',
figsize= (12, 8),
z_score=0,
annot=False,
yticklabels=False,
center = 0,
cbar=True)
sns_plot.ax_row_dendrogram.set_visible(False)
# moves the color bar and changes the font size
#[distance horizontal, distance vertical, width horizontal, width vertical]
sns_plot.cax.set_position([.92, .54, .05, .20])
plt.setp(sns_plot.cax.yaxis.get_majorticklabels(), fontsize=15)
plt.setp(sns_plot.ax_heatmap.get_yticklabels(), fontsize = 16 ,style ="italic", rotation=45) # For y axis
plt.setp(sns_plot.ax_heatmap.get_xticklabels(), fontsize = 20) # For x axis
plt.show()
# this generates a list of genes ordered by the clustering
genes_df = pd.DataFrame(avg_df.index.tolist())
genes_df['index1'] = genes_df.index
neworder_df = pd.DataFrame(sns_plot.dendrogram_row.reordered_ind, columns = ['index1'])
orderedbycluster = pd.merge(neworder_df, genes_df, how='inner', on='index1')
orderedbycluster = orderedbycluster.rename(columns={0: 'gene'})
# make a new df with the genes ordered with this clustering (so does not need to be clustered before plotting)
avg_df_neworder = pd.merge(avg_df, orderedbycluster, how='right', left_index= True, right_on='gene')
avg_df_neworder = avg_df_neworder.set_index(['gene'])
avg_df_neworder = avg_df_neworder.drop(['index1'], axis=1)
#delete index header
del avg_df_neworder.index.name
# plot first part of the heatmap
sns_plot = sns.clustermap(avg_df_neworder.iloc[0:114,0:8],
col_cluster=False,
row_cluster=False,
cmap = 'seismic',
figsize= (10, 40),
z_score=0,
center = 0,
annot=False,
yticklabels=True)
sns_plot.ax_row_dendrogram.set_visible(False)
#sns_plot.cax.set_visible(False)
# moves the color bar and changes the font size
#[distance horizontal, distance vertical, width horizontal, width vertical]
sns_plot.cax.set_position([1.1, .645, .05, .08])
plt.setp(sns_plot.cax.yaxis.get_majorticklabels(), fontsize=14)
plt.setp(sns_plot.ax_heatmap.get_yticklabels(), fontsize = 14 ,style ="italic") # For y axis
plt.setp(sns_plot.ax_heatmap.get_xticklabels(), fontsize = 20) # For y axis
plt.show()
sns_plot.savefig("all_genes_long_part1.pdf", dpi=300)
sns_plot = sns.clustermap(avg_df_neworder.iloc[115:,0:8],
col_cluster=False,
row_cluster=False,
cmap = 'seismic',
figsize= (10, 40),
z_score=0,
center = 0,
annot=False,
yticklabels=True)
sns_plot.ax_row_dendrogram.set_visible(False)
# moves the color bar and changes the font size
#[distance horizontal, distance vertical, width horizontal, width vertical]
sns_plot.cax.set_position([1.1, .645, .05, .08])
plt.setp(sns_plot.cax.yaxis.get_majorticklabels(), fontsize=14)
plt.setp(sns_plot.ax_heatmap.get_yticklabels(), fontsize = 14 ,style ="italic") # For y axis
plt.setp(sns_plot.ax_heatmap.get_xticklabels(), fontsize = 20) # For y axis
plt.show()
sns_plot.savefig("all_genes_long_part2.pdf", dpi=300)
###Output
_____no_output_____
###Markdown
with different fonts
###Code
# font normal
github_url = 'https://github.com/google/fonts/blob/9409aff0417ff2e6e66c40c673339214185251d4/apache/roboto/Roboto%5Bwdth%2Cwght%5D.ttf'
url = github_url + '?raw=true' # You want the actual file, not some html
#response = urllib3.request.urlopen(url)
response = urllib.request.urlopen(url)
f = NamedTemporaryFile(delete=False, suffix='.ttf')
f.write(response.read())
f.close()
# font italic
github_url = 'https://github.com/google/fonts/blob/9409aff0417ff2e6e66c40c673339214185251d4/apache/roboto/Roboto-Italic%5Bwdth%2Cwght%5D.ttf'
url = github_url + '?raw=true' # You want the actual file, not some html
#response = urllib3.request.urlopen(url)
response = urllib.request.urlopen(url)
g = NamedTemporaryFile(delete=False, suffix='.ttf')
g.write(response.read())
g.close()
prop_g = fm.FontProperties(fname=g.name, size = 14)
prop_l = fm.FontProperties(fname=f.name, size = 14)
prop_f = fm.FontProperties(fname=f.name, size = 20)
sns_plot = sns.clustermap(avg_df_neworder.iloc[0:114,0:8],
col_cluster=False,
row_cluster=False,
cmap = 'seismic',
figsize= (10, 40),
z_score=0,
center = 0,
#annot=False,
yticklabels=True)
sns_plot.ax_row_dendrogram.set_visible(False)
#sns_plot.cax.set_visible(False)
# moves the color bar and changes the font size
#[distance horizontal, distance vertical, width horizontal, width vertical]
sns_plot.cax.set_position([1.1, .645, .05, .08])
plt.setp(sns_plot.cax.yaxis.get_majorticklabels(), fontproperties=prop_l)
plt.setp(sns_plot.ax_heatmap.get_yticklabels(), fontproperties=prop_g) # For y axis
plt.setp(sns_plot.ax_heatmap.get_xticklabels(), fontproperties=prop_f) # For y axis
plt.show()
sns_plot.savefig("all_genes_long_part1_roboto.pdf", dpi=300)
sns_plot = sns.clustermap(avg_df_neworder.iloc[115:,0:8],
col_cluster=False,
row_cluster=False,
cmap = 'seismic',
figsize= (10, 40),
z_score=0,
center = 0,
#annot=False,
yticklabels=True)
sns_plot.ax_row_dendrogram.set_visible(False)
#sns_plot.cax.set_visible(False)
# moves the color bar and changes the font size
#[distance horizontal, distance vertical, width horizontal, width vertical]
sns_plot.cax.set_position([1.1, .645, .05, .08])
plt.setp(sns_plot.cax.yaxis.get_majorticklabels(), fontproperties=prop_l)
plt.setp(sns_plot.ax_heatmap.get_yticklabels(), fontproperties=prop_g) # For y axis
plt.setp(sns_plot.ax_heatmap.get_xticklabels(), fontproperties=prop_f) # For y axis
plt.show()
sns_plot.savefig("all_genes_long_part2_roboto.pdf", dpi=300)
###Output
_____no_output_____
###Markdown
4) Subset with selected gene list
###Code
select_genes = pd.read_csv('selected_genes_oct20.csv', index_col = 0)
avg_df_select = pd.merge(avg_df, select_genes, how='right', left_index= True, right_on='gene')
#delete index header
del avg_df_select.index.name
sns_plot = sns.clustermap(avg_df_select.iloc[:,0:8],
col_cluster=False,
row_cluster=False,
cmap = 'seismic',
figsize= (10, 8),
z_score=0,
center = 0,
#annot=False,
yticklabels=True)
sns_plot.ax_row_dendrogram.set_visible(False)
#sns_plot.cax.set_visible(False)
# moves the color bar and changes the font size
#[distance horizontal, distance vertical, width horizontal, width vertical]
sns_plot.cax.set_position([1.05, .63, .04, .1])
plt.setp(sns_plot.cax.yaxis.get_majorticklabels(), fontsize=14)
plt.setp(sns_plot.ax_heatmap.get_yticklabels(), fontsize = 14 , style ="italic") # For y axis
plt.setp(sns_plot.ax_heatmap.get_xticklabels(), fontsize = 20) # For y axis
plt.show()
sns_plot.savefig("selected_genes.pdf", dpi=300)
# font normal
github_url = 'https://github.com/google/fonts/blob/9409aff0417ff2e6e66c40c673339214185251d4/apache/roboto/Roboto%5Bwdth%2Cwght%5D.ttf'
url = github_url + '?raw=true' # You want the actual file, not some html
#response = urllib3.request.urlopen(url)
response = urllib.request.urlopen(url)
f = NamedTemporaryFile(delete=False, suffix='.ttf')
f.write(response.read())
f.close()
# font italic
github_url = 'https://github.com/google/fonts/blob/9409aff0417ff2e6e66c40c673339214185251d4/apache/roboto/Roboto-Italic%5Bwdth%2Cwght%5D.ttf'
url = github_url + '?raw=true' # You want the actual file, not some html
#response = urllib3.request.urlopen(url)
response = urllib.request.urlopen(url)
g = NamedTemporaryFile(delete=False, suffix='.ttf')
g.write(response.read())
g.close()
prop_g = fm.FontProperties(fname=g.name, size = 14)
prop_l = fm.FontProperties(fname=f.name, size = 14)
prop_f = fm.FontProperties(fname=f.name, size = 20)
sns_plot = sns.clustermap(avg_df_select.iloc[:,0:8],
col_cluster=False,
row_cluster=False,
cmap = 'seismic',
figsize= (10, 8),
z_score=0,
center = 0,
#annot=False,
yticklabels=True)
sns_plot.ax_row_dendrogram.set_visible(False)
#sns_plot.cax.set_visible(False)
# moves the color bar and changes the font size
#[distance horizontal, distance vertical, width horizontal, width vertical]
sns_plot.cax.set_position([1.05, .63, .04, .1])
plt.setp(sns_plot.cax.yaxis.get_majorticklabels(), fontsize=14, fontproperties=prop_l)
plt.setp(sns_plot.ax_heatmap.get_yticklabels(), fontsize = 14 , style ="italic", fontproperties=prop_g) # For y axis
plt.setp(sns_plot.ax_heatmap.get_xticklabels(), fontsize = 14, fontproperties=prop_f) # For y axis
plt.show()
sns_plot.savefig("selected_genes_roboto.pdf", dpi=300)
###Output
_____no_output_____
###Markdown
5) Calculate Pseudotime
###Code
# to annotate by week and day
def annotate_df(df):
# extract conditions
week_list = []
day_list = []
for index, row in df.iterrows():
week = 0
day = 0
if len(index.split('-'))> 1:
week = str(index.split('-')[1])
if len(index.split('-'))> 1:
day = str(index.split('-')[0])
week_list.append(week)
day_list.append(day)
df['week'] = week_list
df['day'] = day_list
return df
podo = pd.read_csv('pseudotime_podocytes.csv', index_col = 0)
podo['stage']=podo.index
podo = annotate_df(podo)
podo.head()
podo['stage'].value_counts()
podo.groupby(['stage']).mean()
podo.groupby(['stage']).std()
podo.groupby(['week']).mean()
podo.groupby(['week']).std()
podo.groupby(['day']).mean()
podo.groupby(['day']).std()
###Output
_____no_output_____ |
outline/.ipynb_checkpoints/Dynamic Programming-checkpoint.ipynb | ###Markdown
1. Longest Palindromic SubstringGiven a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000.
###Code
# one solution is to reverse the string and find the longest common string
# which has the correpsonding index
# this attempt starts from the left of the str and add one character each time
# and check if it is a new palindrome with 1 or 2 more characters
def longestPalindrome(s):
if len(s) == 0:
return ""
maxL = 0
start = 0
for i in range(len(s)):
if i - maxL >= 1 and s[i-maxL-1:i+1] == s[i-maxL-1:i+1][::-1]:
start = i-maxL-1
maxL += 2
continue
if i - maxL >= 0 and s[i-maxL:i+1] == s[i-maxL:i+1][::-1]:
start = i-maxL
maxL += 1
return s[start:start+maxL]
s = 'ababakk'
longestPalindrome(s)
###Output
_____no_output_____
###Markdown
2. Minimum Path SumGiven a m x n grid filled with non-negative numbers, find a path from top left to bottom right which minimizes the sum of all numbers along its path.
###Code
def minPathSum(grid):
m, n = len(grid), len(grid[0])
for i in range(1,n):
grid[0][i] += grid[0][i-1]
for i in range(1,m):
grid[i][0] += grid[i-1][0]
for i in range(1,m):
for j in range(1,n):
grid[i][j] += min(grid[i-1][j], grid[i][j-1])
return grid[-1][-1]
grid = [[1,3,1],
[1,1,1],
[4,2,1]]
minPathSum(grid)
###Output
_____no_output_____
###Markdown
3. Edit DistanceGiven two words word1 and word2, find the minimum number of operations required to convert word1 to word2.You have the following 3 operations permitted on a word:Insert a characterDelete a characterReplace a character
###Code
def minDistance(word1, word2):
m = len(word1)
n = len(word2)
table = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(m + 1):
table[i][0] = i
for j in range(n + 1):
table[0][j] = j
for i in range(1, m + 1):
for j in range(1, n + 1):
if word1[i - 1] == word2[j - 1]:
table[i][j] = table[i - 1][j - 1]
else:
table[i][j] = 1 + min(table[i - 1][j], table[i][j - 1], table[i - 1][j - 1])
return table[-1][-1]
word1 = 'horse'
word2 = 'rossdgfa'
minDistance(word1, word2)
###Output
_____no_output_____
###Markdown
Consider the DP and recursive solutions:https://leetcode.com/problems/edit-distance/discuss/159295/Python-solutions-and-intuition 4. Maximal RectangleGiven a 2D binary matrix filled with 0's and 1's, find the largest rectangle containing only 1's and return its area.
###Code
# failed attempt that computes areas of all rectangles
def maximalRectangle(matrix):
if not matrix:
return 0
m, n = len(matrix), len(matrix[0])
areaM = matrix
maxA = 0
if matrix[0][0] == 1:
areaM[0][0] = [1,1]
maxA = 0
elif matrix[0][0] == 0:
areaM[0][0] = [0,0]
for i in range(1,n):
if matrix[0][i] == 1:
areaM[0][i] == [1,1+areaM[0][i-1][0]]
maxA = max(maxA, 1+areaM[0][i-1][0])
elif matrix[0][i] == 0:
areaM[0][i] == [0,0]
for j in range(1,m):
if matrix[j][0] == 1:
areaM[j][0] == [1+areaM[j-1][0][1],1]
maxA = max(maxA, 1+areaM[j-1][0][1])
elif matrix[j][0] == 0:
areaM[j][0] == [0,0]
for j in range(1,m):
for i in range(1,n):
if matrix[j][i] == 1:
areaM[j][i] = [1+areaM[j][i-1][0], 1+areaM[j-1][i][1]]
maxA = max(maxA, areaM[j][i][0] * areaM[j][i][1])
return maxA
# largest rectangle in histogram
def maximalRectangle(matrix):
if not matrix or not matrix[0]:
return 0
n = len(matrix[0])
height = [0] * (n+1)
ans = 0
for row in matrix:
for i in range(n):
height[i] = height[i] + 1 if row[i] == '1' else 0
stack = [-1]
for i in range(n+1):
while height[i] < height[stack[-1]]:
h = height[stack.pop()]
w = i - 1 - stack[-1]
ans = max(ans, h * w)
stack.append(i)
return ans
matrix = [
["1","0","1","0","0"],
["1","0","1","1","1"],
["1","1","1","1","1"],
["1","0","0","1","0"]
]
maximalRectangle(matrix)
###Output
_____no_output_____
###Markdown
About stack https://www.cnblogs.com/2Bthebest1/category/1452000.html 5. Decode WaysA message containing letters from A-Z is being encoded to numbers using the following mapping:'A' -> 1'B' -> 2...'Z' -> 26Given a non-empty string containing only digits, determine the total number of ways to decode it.
###Code
# first attempt, fix 0
# false with str somparasion
def numDecodings(s):
if s[0] == '0':
s = s[1::]
if len(s) == 0 or (len(s)==1 and s[0] == '0'):
return 0
count = 1
pre = s[0]
for num in s[1::]:
if num == '0':
continue
if '0'< pre+num <= '26':
count += 1
pre = num
return count
s = '01'
numDecodings(s)
# second attempt accoding to test results
def numDecodings(s):
if not s:
return 0
dp = [0 for x in range(len(s)+1)]
dp[0] = 1
dp[1] = 0 if s[0] == '0' else 1
for i in range(2, len(s)+1):
if int(s[i-1]) != 0:
dp[i] += dp[i-1]
if 10 <= int(s[i-2:i]) <= 26:
dp[i] += dp[i-2]
return dp[len(s)]
s = '001'
numDecodings(s)
s = '1201'
numDecodings(s)
s = '12001'
numDecodings(s)
s = '1234'
s[0:2]
###Output
_____no_output_____
###Markdown
6. TriangleGiven a triangle, find the minimum path sum from top to bottom. Each step you may move to adjacent numbers on the row below.For example, given the following triangle[[2], [3,4], [6,5,7], [4,1,8,3]]The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).
###Code
def minimumTotal(triangle):
n = len(triangle)
if n==1:
return triangle[0][0]
for i in range(1,n):
triangle[i][0] += triangle[i-1][0]
triangle[i][-1] += triangle[i-1][-1]
for j in range(2,n):
row = triangle[j]
for i in range(1,len(row)-1):
triangle[j][i] += min(triangle[j-1][i-1], triangle[j-1][i])
return min(triangle[-1])
triangle = [
[2],
[3,4],
[6,5,7],
[4,1,8,3]
]
minimumTotal(triangle)
###Output
_____no_output_____
###Markdown
7. Maximum SubarrayGiven an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
###Code
def maxSubArray(nums):
for i in range(1,len(nums)):
if nums[i-1] > 0:
nums[i] += nums[i-1]
return max(nums)
nums = [-2,1,-3,4,-1,2,1,-5,4]
maxSubArray(nums)
###Output
_____no_output_____
###Markdown
8. Word BreakGiven a non-empty string s and a dictionary wordDict containing a list of non-empty words, determine if s can be segmented into a space-separated sequence of one or more dictionary words.
###Code
def wordBreak(s,wordDict):
inside = [True]
for i in range(1, len(s)+1):
inside += any(inside[j] and s[j:i] in wordDict for j in range(i)),
return inside[-1]
s = "applepenapple"
wordDict = ["apple", "pen"]
wordBreak(s,wordDict)
s = "catsandog"
wordDict = ["cats", "dog", "sand", "and", "cat"]
wordBreak(s,wordDict)
## optimize
def wordBreak(s,wordDict):
inside = [True]
maxL = max(map(len, wordDict+['']))
wordDict = set(wordDict)
for i in range(1, len(s)+1):
inside += any(inside[j] and s[j:i] in wordDict for j in range(max(0, i-maxL), i)),
return inside[-1]
s = "applepenapple"
wordDict = ["apple", "pen"]
wordBreak(s,wordDict)
s = "catsandog"
wordDict = ["cats", "dog", "sand", "and", "cat"]
wordBreak(s,wordDict)
###Output
_____no_output_____
###Markdown
9. Dungeon GameThe demons had captured the princess (P) and imprisoned her in the bottom-right corner of a dungeon. The dungeon consists of M x N rooms laid out in a 2D grid. Our valiant knight (K) was initially positioned in the top-left room and must fight his way through the dungeon to rescue the princess.The knight has an initial health point represented by a positive integer. If at any point his health point drops to 0 or below, he dies immediately.Some of the rooms are guarded by demons, so the knight loses health (negative integers) upon entering these rooms; other rooms are either empty (0's) or contain magic orbs that increase the knight's health (positive integers).In order to reach the princess as quickly as possible, the knight decides to move only rightward or downward in each step.
###Code
# first attempt
# let M[i][j] be the minimum health needed for the ij-th position
# C[i][j] is the current health at ii-th position with min health strategy
def calculateMinimumHP(dungeon):
m, n = len(dungeon), len(dungeon[0])
M = dungeon
C = dungeon
# compute min health needed on boundary
M[0][0] = -dungeon[0][0] if dungeon[0][0]<0 else 0
cur = dungeon[0][0] + M[0][0]
for i in range(1,n):
if dungeon[0][i]+C[0][i-1] >= 0:
M[0][i] = M[0][i-1]
C[0][i] = dungeon[0][i]+C[0][i-1]
else:
M[0][i] = M[0][i-1]-dungeon[0][i]-C[0][i-1]
C[0][i] = 0
for j in range(1,m):
if dungeon[j][0]+C[j-1][0] >= 0:
M[j][0] = M[j-1][0]
C[j][0] = dungeon[j][0]+C[j-1][0]
else:
M[j][0] = M[j-1][0]-dungeon[j][0]-C[j-1][0]
C[j][0] = 0
for j in range(1,m):
for i in range(1,n):
left = M[j][i-1] if dungeon[j][i]+C[j][i-1] >= 0 else M[j][i-1]-dungeon[j][i]-C[j][i-1]
up = M[j-1][i] if dungeon[j][i]+C[j-1][i] >= 0 else M[j-1][i]-dungeon[j][i]-C[j-1][i]
# could be right but it is overly complicated
# second attempt
# dp[i][j] is the min health needed when enter ij-th position
def calculateMinimumHP(dungeon):
m, n = len(dungeon), len(dungeon[0])
dp = [[float('inf') for _ in range(n+1)] for _ in range(m+1)]
dp[m-1][n] = dp[m][n-1] = 0
for i in range(m-1, -1, -1):
for j in range(n-1, -1, -1):
dp[i][j] = max(min(dp[i+1][j], dp[i][j+1])-dungeon[i][j], 0)
return dp[0][0]+1
dungeon = [[-2,-3,3],[-5,-10,1],[10,30,-5]]
calculateMinimumHP(dungeon)
###Output
_____no_output_____
###Markdown
10. Unique Binary Search TreesGiven n, how many structurally unique BST's (binary search trees) that store values 1 ... n?
###Code
# dp[i] is the number of UBNTs with i elts
def numTrees(n):
dp = [0] * (n+1)
dp[0] = 1
for i in range(1,n+1):
for j in range(i):
dp[i] += dp[j] * dp[i-j-1]
return dp[n]
n=3
numTrees(n)
###Output
_____no_output_____
###Markdown
11. Best Time to Buy and Sell StockSay you have an array for which the ith element is the price of a given stock on day i.If you were only permitted to complete at most one transaction (i.e., buy one and sell one share of the stock), design an algorithm to find the maximum profit.Note that you cannot sell a stock before you buy one.
###Code
# use stack
def maxProfit(prices):
mprof, min_price = 0, float('inf')
for price in prices:
min_price = min(min_price, price)
mprof = max(price - min_price, mprof)
return mprof
prices = [7,1,5,3,6,4]
maxProfit(prices)
###Output
_____no_output_____ |
notebooks/hmm_poisson_changepoint_tfp.ipynb | ###Markdown
HMM with Poisson observations for detecting changepoints in the rate of a signalCode is fromhttps://www.tensorflow.org/probability/examples/Multiple_changepoint_detection_and_Bayesian_model_selection
###Code
import numpy as np
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
from matplotlib import pylab as plt
%matplotlib inline
import scipy.stats
###Output
_____no_output_____
###Markdown
DataThe synthetic data corresponds to a single time series of counts, where the rate of the underlying generative process changes at certain points in time.
###Code
true_rates = [40, 3, 20, 50]
true_durations = [10, 20, 5, 35]
np.random.seed(42)
observed_counts = np.concatenate([
scipy.stats.poisson(rate).rvs(num_steps)
for (rate, num_steps) in zip(true_rates, true_durations)
]).astype(np.float32)
plt.plot(observed_counts)
plt.savefig('hmm_poisson_data.pdf')
###Output
_____no_output_____
###Markdown
Model with fixed $K$To model the changing Poisson rate, we use an HMM.We initially assume the number of states is known to be $K=4$.Later we will try comparing HMMs with different $K$.We fix the initial state distribution to be uniform,and fix the transition matrix to be the following, where we set $p=0.05$:$$\begin{align*}z_1 &\sim \text{Categorical}\left(\left\{\frac{1}{4}, \frac{1}{4}, \frac{1}{4}, \frac{1}{4}\right\}\right)\\z_t | z_{t-1} &\sim \text{Categorical}\left(\left\{\begin{array}{cc}p & \text{if } z_t = z_{t-1} \\ \frac{1-p}{4-1} & \text{otherwise}\end{array}\right\}\right)\end{align*}$$
###Code
num_states = 4
initial_state_logits = np.zeros([num_states], dtype=np.float32) # uniform distribution
daily_change_prob = 0.05
transition_probs = daily_change_prob / (num_states-1) * np.ones(
[num_states, num_states], dtype=np.float32)
np.fill_diagonal(transition_probs,
1-daily_change_prob)
print("Initial state logits:\n{}".format(initial_state_logits))
print("Transition matrix:\n{}".format(transition_probs))
###Output
Initial state logits:
[0. 0. 0. 0.]
Transition matrix:
[[0.95 0.01666667 0.01666667 0.01666667]
[0.01666667 0.95 0.01666667 0.01666667]
[0.01666667 0.01666667 0.95 0.01666667]
[0.01666667 0.01666667 0.01666667 0.95 ]]
###Markdown
Now we create an HMM where the observation distribution is a Poisson with learnable parameters. We specify the parameters in log space and initialize them to random values around the log of the overall mean count (to set the scale).
###Code
# Define variable to represent the unknown log rates.
np.random.seed(1)
trainable_log_rates = tf.Variable(
np.log(np.mean(observed_counts)) + tf.random.normal([num_states]),
name='log_rates')
hmm = tfd.HiddenMarkovModel(
initial_distribution=tfd.Categorical(
logits=initial_state_logits),
transition_distribution=tfd.Categorical(probs=transition_probs),
observation_distribution=tfd.Poisson(log_rate=trainable_log_rates),
num_steps=len(observed_counts))
###Output
_____no_output_____
###Markdown
Model fitting using gradient descent.We compute a MAP estimate of the Poisson rates $\lambda$ using batch gradient descent, using the Adam optimizer applied to the log likelihood (from the HMM) plus the log prior for $p(\lambda)$.
###Code
rate_prior = tfd.LogNormal(5, 5)
def log_prob():
return (tf.reduce_sum(rate_prior.log_prob(tf.math.exp(trainable_log_rates))) +
hmm.log_prob(observed_counts))
losses = tfp.math.minimize(
lambda: -log_prob(),
optimizer=tf.optimizers.Adam(learning_rate=0.1),
num_steps=100)
plt.plot(losses)
plt.ylabel('Negative log marginal likelihood')
rates = tf.exp(trainable_log_rates)
print("Inferred rates: {}".format(rates))
print("True rates: {}".format(true_rates))
###Output
Inferred rates: [ 2.6820226 18.98235 38.94888 50.867588 ]
True rates: [40, 3, 20, 50]
###Markdown
We see that the method learned a good approximation to the true (generating) parameters, up to a permutation of the states (since the labels are unidentifiable). However, results can vary with different random seeds. We may find that the rates are the same for some states, which means those states are being treated as identical, and are therefore redundant. Plotting the posterior over states
###Code
# Runs forward-backward algorithm to compute marginal posteriors.
posterior_dists = hmm.posterior_marginals(observed_counts)
posterior_probs = posterior_dists.probs_parameter().numpy()
def plot_state_posterior(ax, state_posterior_probs, title):
ln1 = ax.plot(state_posterior_probs, c='blue', lw=3, label='p(state | counts)')
ax.set_ylim(0., 1.1)
ax.set_ylabel('posterior probability')
ax2 = ax.twinx()
ln2 = ax2.plot(observed_counts, c='black', alpha=0.3, label='observed counts')
ax2.set_title(title)
ax2.set_xlabel("time")
lns = ln1+ln2
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, loc=4)
ax.grid(True, color='white')
ax2.grid(False)
fig = plt.figure(figsize=(10, 10))
plot_state_posterior(fig.add_subplot(2, 2, 1),
posterior_probs[:, 0],
title="state 0 (rate {:.2f})".format(rates[0]))
plot_state_posterior(fig.add_subplot(2, 2, 2),
posterior_probs[:, 1],
title="state 1 (rate {:.2f})".format(rates[1]))
plot_state_posterior(fig.add_subplot(2, 2, 3),
posterior_probs[:, 2],
title="state 2 (rate {:.2f})".format(rates[2]))
plot_state_posterior(fig.add_subplot(2, 2, 4),
posterior_probs[:, 3],
title="state 3 (rate {:.2f})".format(rates[3]))
plt.tight_layout()
print(rates)
rates_np = rates.numpy()
print(rates_np)
# max marginals
most_probable_states = np.argmax(posterior_probs, axis=1)
most_probable_rates = rates_np[most_probable_states]
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(1, 1, 1)
ax.plot(most_probable_rates, c='green', lw=3, label='inferred rate')
ax.plot(observed_counts, c='black', alpha=0.3, label='observed counts')
ax.set_ylabel("latent rate")
ax.set_xlabel("time")
ax.set_title("Inferred latent rate over time")
ax.legend(loc=4)
# max probaility trajectory (Viterbi)
most_probable_states = hmm.posterior_mode(observed_counts)
most_probable_rates = rates_np[most_probable_states]
fig = plt.figure(figsize=(10, 4))
ax = fig.add_subplot(1, 1, 1)
color_list = np.array(['r', 'g', 'b', 'k'])
colors = color_list[most_probable_states]
for i in range(len(colors)):
ax.plot(i, most_probable_rates[i], '-o', c=colors[i], lw=3)
ax.plot(observed_counts, c='black', alpha=0.3, label='observed counts')
ax.set_ylabel("latent rate")
ax.set_xlabel("time")
ax.set_title("Inferred latent rate over time")
plt.savefig('hmm_poisson_4states_segmentation.pdf')
###Output
_____no_output_____
###Markdown
Model with unknown $K$In general we don't know the true number of states. One way to select the 'best' model is to compute the one with the maximum marginal likelihood. Rather than summing over both discrete latent states and integrating over the unknown parameters $\lambda$, we just maximuze over the parameters (empirical Bayes approximation).$$p(x_{1:T}|K) \approx \max_\lambda \int p(x_{1:T}, z_{1:T} | \lambda, K) dz$$We can do this by fitting a bank of separate HMMs in parallel, one for each value of $K$. We need to make them all the same size so we can batch them efficiently. To do this, we pad the transition matrices (and other paraemeter vectors) so they all have the same shape, and then use masking.
###Code
max_num_states = 6
def build_latent_state(num_states, max_num_states, daily_change_prob=0.05):
# Give probability exp(-100) ~= 0 to states outside of the current model.
initial_state_logits = -100. * np.ones([max_num_states], dtype=np.float32)
initial_state_logits[:num_states] = 0.
# Build a transition matrix that transitions only within the current
# `num_states` states.
transition_probs = np.eye(max_num_states, dtype=np.float32)
if num_states > 1:
transition_probs[:num_states, :num_states] = (
daily_change_prob / (num_states-1))
np.fill_diagonal(transition_probs[:num_states, :num_states],
1-daily_change_prob)
return initial_state_logits, transition_probs
# For each candidate model, build the initial state prior and transition matrix.
batch_initial_state_logits = []
batch_transition_probs = []
for num_states in range(1, max_num_states+1):
initial_state_logits, transition_probs = build_latent_state(
num_states=num_states,
max_num_states=max_num_states)
batch_initial_state_logits.append(initial_state_logits)
batch_transition_probs.append(transition_probs)
batch_initial_state_logits = np.array(batch_initial_state_logits)
batch_transition_probs = np.array(batch_transition_probs)
print("Shape of initial_state_logits: {}".format(batch_initial_state_logits.shape))
print("Shape of transition probs: {}".format(batch_transition_probs.shape))
print("Example initial state logits for num_states==3:\n{}".format(batch_initial_state_logits[2, :]))
print("Example transition_probs for num_states==3:\n{}".format(batch_transition_probs[2, :, :]))
trainable_log_rates = tf.Variable(
tf.fill([batch_initial_state_logits.shape[0], max_num_states],
tf.math.log(tf.reduce_mean(observed_counts))) +
tf.random.stateless_normal([1, max_num_states], seed=(42, 42)),
name='log_rates')
hmm = tfd.HiddenMarkovModel(
initial_distribution=tfd.Categorical(
logits=batch_initial_state_logits),
transition_distribution=tfd.Categorical(probs=batch_transition_probs),
observation_distribution=tfd.Poisson(log_rate=trainable_log_rates),
num_steps=len(observed_counts))
print("Defined HMM with batch shape: {}".format(hmm.batch_shape))
rate_prior = tfd.LogNormal(5, 5)
def log_prob():
prior_lps = rate_prior.log_prob(tf.math.exp(trainable_log_rates))
prior_lp = tf.stack(
[tf.reduce_sum(prior_lps[i, :i+1]) for i in range(max_num_states)])
return prior_lp + hmm.log_prob(observed_counts)
###Output
_____no_output_____
###Markdown
Model fitting with gradient descent
###Code
losses = tfp.math.minimize(
lambda: -log_prob(),
optimizer=tf.optimizers.Adam(0.1),
num_steps=100)
plt.plot(losses)
plt.ylabel('Negative log marginal likelihood')
###Output
_____no_output_____
###Markdown
Plot marginal likelihood of each model
###Code
num_states = np.arange(1, max_num_states+1)
plt.plot(num_states, -losses[-1])
plt.ylim([-400, -200])
plt.ylabel("marginal likelihood $\\tilde{p}(x)$")
plt.xlabel("number of latent states")
plt.title("Model selection on latent states")
plt.savefig('poisson_hmm_marglik_vs_k.pdf')
!ls
###Output
hmm_poisson_4states_segmentation.pdf hmm_poisson_segmentation_1to6.pdf
hmm_poisson_data.pdf sample_data
hmm_poisson_marglik_vs_k.pdf
###Markdown
Plot posteriors
###Code
rates = tf.exp(trainable_log_rates).numpy()
for i, learned_model_rates in enumerate(rates):
print("rates for {}-state model: {}".format(i+1, learned_model_rates[:i+1]))
posterior_probs = hmm.posterior_marginals(
observed_counts).probs_parameter().numpy()
most_probable_states = np.argmax(posterior_probs, axis=-1)
!ls
fig = plt.figure(figsize=(12, 6))
for i, learned_model_rates in enumerate(rates):
ax = fig.add_subplot(2, 3, i+1)
ax.plot(learned_model_rates[most_probable_states[i]], c='green', lw=3, label='inferred rate')
ax.plot(observed_counts, c='black', alpha=0.3, label='observed counts')
ax.set_ylabel("latent rate")
ax.set_xlabel("time")
ax.set_title("{}-state model".format(i+1))
ax.legend(loc=4)
plt.tight_layout()
plt.savefig('hmm_poisson_segmentation_1to6.pdf')
###Output
_____no_output_____ |
tf0/Demo_Tensorflow_Tensors.ipynb | ###Markdown
Python Doesn't Have Good Numeric Support* Python integers are actually an object with header and typing information* access to Python integers requires a level of indirection* In C, integers are directly accessible in memory without indirection The Problem is Even Worse for Python Lists * Python lists are immensely flexible * no fixed size * OK to have heterogeneous data* ...but as a result they are not likely to be contiguous in memory* and even if they are, there is still a lot of indirection required* so they aren't good for fast number crunching
###Code
pylist = list(range(1_000_000))
%timeit [i + 1 for i in pylist]
###Output
_____no_output_____
###Markdown
One solution is to use Tensorflow tensors* written in C++* allows for vectorized operations
###Code
#!pip install --upgrade -q tensorflow==2.4.0
import tensorflow as tf
tf.__version__
###Output
_____no_output_____
###Markdown
TensorFlow Scalars
###Code
tf.constant(42)
tf.constant(42).dtype
tf.constant(42).shape
len(tf.constant(42).shape) == 0
tf.constant(3.14).dtype
float(tf.constant(3.14).numpy())
float(tf.constant(3.14).numpy()) == 3.14
###Output
_____no_output_____
###Markdown
IEEE Standard for Floating-Point Arithmetic (IEEE 754) * a refresher on floating point precision issues
###Code
x = 0.3
x
3 * 0.1 == x
3 * 0.1
x = tf.constant(3.14)
tf.cast(x, dtype=tf.uint8).numpy()
tf.cast(x, dtype=tf.int8)
tf.cast(tf.cast(x, dtype=tf.int8), dtype=tf.float32)
###Output
_____no_output_____
###Markdown
Numerical Truncation* nearest integer __`i`__ which is closer to zero than __`x`__ is
###Code
# remove fractional component
trunc_x = x-tf.truncatemod(x, tf.constant(1.))
trunc_x
trunc_x.dtype
###Output
_____no_output_____
###Markdown
`tf.math.floor()`* the largest integer __`i`__, such that __`i <= x`__
###Code
tf.math.floor(x)
tf.math.floor(tf.constant(2.01))
tf.math.floor(tf.constant(2.))
tf.math.floor(tf.constant(-3.14))
###Output
_____no_output_____
###Markdown
`tf.math.ceil()`* the smallest integer __`i`__, such that __`i >= x`__
###Code
tf.math.ceil(x)
tf.math.ceil(tf.constant(2.01))
tf.math.ceil(tf.constant(2.))
###Output
_____no_output_____
###Markdown
* can __pt.ceil()__ be used in place of __pt.floor()__ ?
###Code
tf.math.ceil(x) - 1
tf.math.ceil(tf.constant(2.01)) - 1
tf.math.ceil(tf.constant(2.)) - 1
###Output
_____no_output_____
###Markdown
PyTorch arrays* data is stored contiguously in memory
###Code
# tensorflow will infer the data type
a = tf.constant([1, 4, 2, 5, 3])
a, a.dtype
a = tf.constant([3.14, 4, 2, 3])
a, a.dtype
# ...or you can be explicit
a = tf.constant([1, 2, 3, 4], dtype=tf.float32)
a
tf.constant([range(i, i + 3) for i in [2, 4, 6]])
tf.zeros(10, dtype=tf.int32)
tf.ones((3, 5), dtype=tf.float64)
tf.eye(5)
tf.fill((3, 5), 42)
tf.range(0, 20, 2)
tf.linspace(0, 1, 5)
###Output
_____no_output_____
###Markdown
Pseudo-Random Numbers
###Code
tf.random.set_seed(1)
tf.random.normal((3, 3))
tf.random.normal((3, 3), mean=0, stddev=1)
tf.random.uniform((3, 3), minval=0, maxval=10, dtype=tf.int32)
###Output
_____no_output_____
###Markdown
Converting array types
###Code
x = tf.linspace(0, 10, 50)
x
tf.cast(x,dtype=tf.int32)
###Output
_____no_output_____
###Markdown
Multi-dimensional Arrays
###Code
x2 = tf.random.uniform((3, 4), minval=0, maxval=10, dtype=tf.int32)
x2
###Output
_____no_output_____
###Markdown
True "matrix-style" indexing
###Code
x2[0, 0]
x2[2, 0]
x2[2, -1]
# remember, tensorflow tensors are not mutable; we need to create a variable from it
v2 = tf.Variable(x2)
v2[0,0].assign(0)
v2
tf.reshape(tf.range(0, 9), shape=(3,3))
###Output
_____no_output_____
###Markdown
Array Slicing
###Code
x = tf.range(0, 10)
x[:5]
x[5:]
x[4:7]
x[::2]
x[1::2]
x[::-1]
tf.reverse(x, axis=[0])
tf.reverse(x, axis=[0])[5::2]
###Output
_____no_output_____
###Markdown
Filtering 1-dimensional data
###Code
x = tf.constant([ 1, 0, 5, 2, 1, 0, 8, 0, 0 ])
tf.where(x != 0)
x != 0
x[ x != 0 ]
x[ x < 3 ]
###Output
_____no_output_____
###Markdown
Filtering 2-dimensional data
###Code
x = tf.constant([[1, 0, 0], [0, 5, 0], [7, 8, 0]])
x
# produces two arrays, one with x coords, one with y coords
nz_idx = tf.where( x != 0 )
nz_idx
x[x != 0]
y = tf.reshape(tf.range(1, 10), shape=(3,3))
y
tf.gather(y, axis=0, indices=tf.constant([0,2]))
tf.gather(y, axis=1, indices=tf.constant([0,2]))
tf.experimental.numpy.triu(y)
tf.experimental.numpy.tril(y)
tf.experimental.numpy.tril(y).T #transpose
###Output
_____no_output_____
###Markdown
Multi-dimensional subarrays
###Code
x2
x2[:2, :3]
x2[:3, ::2]
x2[::-1, ::-1]
# reverse on the row axis only
tf.reverse(x2, axis=[0])
indices = tf.range(tf.size(x2) - 1, limit=-1, delta=-1)
indices
tf.experimental.numpy.take(x2, indices).reshape(x2.shape) #x2[::-1, ::-1]
###Output
_____no_output_____
###Markdown
Subarray Views
###Code
x2, id(x2)
v2_sub = tf.Variable(x2[:2, :2])
v2_sub, id(v2_sub)
v2_sub[0,0].assign(99)
v2_sub
v2_sub # changes v2 as well, since the subarray has references to the original
###Output
_____no_output_____
###Markdown
TensorFlow C++ Functions* operate on tensors as on contiguous blobs of data in memory* _vectorized_ wrapper for a function that takes a fixed number of specific inputs and produces a fixed number of specific outputs | Operator | C++ | Description ||----------|-----------------------------|-------------------------------------|| + | tensorflow::ops::Add | Addition (e.g., 1 + 1 = 2) || - | tensorflow::ops::Subtract | Subtraction (e.g., 3 - 2 = 1) || - | tensorflow::ops::Negate | Unary negation (e.g., -2) || * | tensorflow::ops::Multiply | Multiplication (e.g., 2 * 3 = 6) || / | tensorflow::ops::Div | Division (e.g., 3 / 2 = 1.5) || // | tensorflow::ops::FloorDiv | Floor division (e.g., 3 // 2 = 1) || ** | tensorflow::ops::Exp | Exponentiation (e.g., 2 ** 3 = 8) || % | tensorflow::ops::Mod | Modulus/remainder (e.g., 9 % 4 = 1) | Vectorized Operations
###Code
tensorflo = tf.range(1, limit=1_000_000)
%timeit 1 / tensorflo
x = tf.reshape(tf.range(0, limit=9),(3, 3))
2 ** x
x = tf.range(0, limit=4,dtype=tf.float32)
-(0.5 * x + 1) ** 2
###Output
_____no_output_____
###Markdown
Exponents and Logarithms
###Code
x = tf.constant([1., 2., 3.])
tf.math.exp(x)
tf.pow(3, x)
tf.math.log(x)
tf.experimental.numpy.log2(tf.constant([1., 256., 65536.]))
tf.experimental.numpy.log10(tf.constant([1_000., 1_000_000., 10. ** 10]))
###Output
_____no_output_____
###Markdown
Aggregations
###Code
x = tf.reshape(tf.range(0, 15), (3, 5))
x
tf.math.reduce_sum(x)
tf.math.reduce_sum(x, axis=0)
tf.math.reduce_sum(x, axis=1, keepdims=True)
tf.math.reduce_sum(x, axis=1)
x = tf.cast(x, dtype=tf.float64)
tf.math.reduce_mean(x), tf.math.reduce_std(x)
###Output
_____no_output_____ |
9-symbolic-manipulation.ipynb | ###Markdown
Symbolic ManipulationMost computer mathematical work directly manipulates numbers. Symbolic manipulation or computer algebra is using a computer to manipulate algebraic equations.You can do many algebraic and calculus operations.You can learn more [here](http://docs.sympy.org/dev/tutorial/).
###Code
import sympy
# this makes for mathematical typesetting
sympy.init_printing()
# defines symbols
x = sympy.symbols('x')
y = sympy.symbols('y')
###Output
_____no_output_____
###Markdown
We can define a polynomial.
###Code
expr = x**2 + 2*x + 1
expr
###Output
_____no_output_____
###Markdown
We can do some algebraic manipulations.
###Code
expr.factor()
###Output
_____no_output_____
###Markdown
Here is an example of integration
###Code
expr.integrate(x)
###Output
_____no_output_____
###Markdown
Here is a substitution
###Code
expr.subs(x, y**2)
###Output
_____no_output_____
###Markdown
You can also make numerical substitutions for a variable.
###Code
expr.subs(x, 1)
###Output
_____no_output_____ |
wksp-notebooks/generic.ipynb | ###Markdown
**Generic**
###Code
# Imports
import sqlite3
from impulse_sqlite3 import create_tag_table
sqlitedb_path = "C:/Users/zhixian/Documents/PowerShell/impulse.sqlite3"
from impulse_sqlite3 import create_tag_table
create_tag_table(sqlitedb_path)
def create_table():
conn = sqlite3.connect(sqlitedb_path)
c = conn.cursor()
# Create table
c.execute('''CREATE TABLE stocks (date text, trans text, symbol text, qty real, price real)''')
# Save (commit) the changes
conn.commit()
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
def exec():
conn = sqlite3.connect(sqlitedb_path)
c = conn.cursor()
# Create table
c.execute('''CREATE TABLE stocks (date text, trans text, symbol text, qty real, price real)''')
# Insert a row of data
c.execute("INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14)")
# Save (commit) the changes
conn.commit()
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
exec()
###Output
_____no_output_____ |
old/Dipper detection integrated.ipynb | ###Markdown
Setup spark
###Code
def spark_start(local_dir):
from pyspark.sql import SparkSession
spark = (
SparkSession.builder
.appName("LSD2")
.config("spark.sql.warehouse.dir", local_dir)
#.config('spark.master', "local[6]")
.config("spark.master", "local[32]") # yolo mode
.config('spark.driver.memory', '8G') # 128
.config('spark.local.dir', local_dir)
.config('spark.memory.offHeap.enabled', 'true')
.config('spark.memory.offHeap.size', '4G') # 256
.config("spark.sql.execution.arrow.enabled", "true")
.config("spark.driver.maxResultSize", "6G")
.config("spark.driver.extraJavaOptions", f"-Dderby.system.home={local_dir}")
.enableHiveSupport()
.getOrCreate()
)
return spark
spark_session = spark_start("/epyc/users/kyboone/spark-tmp/")
catalog = axs.AxsCatalog(spark_session)
###Output
_____no_output_____
###Markdown
Load ZTF data
###Code
ztf = catalog.load('ztf_oct19')
###Output
_____no_output_____
###Markdown
Load the saved axs table
###Code
wtf = catalog.load('wtf_full_oct19_4')
wtf.count()
# Recalculate the scores
rescored_wtf = (
wtf.select(
'*',
dipper.detect_dippers_udf(
wtf['mjd_g'],
wtf['mag_g'],
wtf['magerr_g'],
wtf['xpos_g'],
wtf['ypos_g'],
wtf['catflags_g'],
wtf['mjd_r'],
wtf['mag_r'],
wtf['magerr_r'],
wtf['xpos_r'],
wtf['ypos_r'],
wtf['catflags_r']
).alias('rescored_dipper'),
)
)
wtf.where(sparkfunc.col("dipper.significance") > 20.).count()
ztf.count()
%%time
# Get the best entries
res = rescored_wtf.sort(rescored_wtf['rescored_dipper.intmag'].desc()).head(1000)
%matplotlib inline
from ipywidgets import interact, IntSlider
def interact_lightcurve(idx, zoom=False):
show_lightcurve(res[idx], zoom=zoom)
interact(interact_lightcurve, idx=IntSlider(0, 0, len(res) - 1))
for idx in range(10):
show_lightcurve(res[idx], verbose=False)
###Output
_____no_output_____
###Markdown
Label how the dip detection works
###Code
detect_dippers_row(res[0])
band_colors = {
'g': 'tab:green',
'r': 'tab:red',
'i': 'tab:purple'
}
def label_dip(row):
plt.figure(figsize=(8, 6), dpi=100)
for band in ['g', 'r', 'i']:
mjd, mag, magerr = parse_observations(
row[f'mjd_{band}'],
row[f'mag_{band}'],
row[f'magerr_{band}'],
row[f'xpos_{band}'],
row[f'ypos_{band}'],
row[f'catflags_{band}'],
)
plt.errorbar(mjd, mag, magerr, fmt='o', c=band_colors[band], label=f'ZTF-{band}')
plt.xlabel('MJD')
if parsed:
plt.ylabel('Magnitude + offset')
else:
plt.ylabel('Magnitude')
plt.legend()
plt.title('objid %d' % row['objid'])
plt.gca().invert_yaxis()
###Output
_____no_output_____
###Markdown
Joining
###Code
gaia = catalog.load('gaia_dr2_1am_dup')
wtf_df = rescored_wtf.crossmatch(gaia.select('ra', 'dec', 'zone', 'dup', 'parallax', 'parallax_over_error', 'phot_g_mean_mag',
'bp_rp')).toPandas()
plt.figure()
abs_mag = wtf_df['phot_g_mean_mag'] - 5 * np.log10(1000 / wtf_df['parallax']) + 5
cut = (wtf_df['parallax_over_error'] > 5)
plt.scatter(wtf_df['bp_rp'][cut], abs_mag[cut], s=1, alpha=0.1)
cut = (wtf_df['parallax_over_error'] > 5) & (wtf_df['new_score'] > 2.5)
plt.scatter(wtf_df['bp_rp'][cut], abs_mag[cut], s=10)
plt.gca().invert_yaxis()
def print_links(row):
print("http://simbad.u-strasbg.fr/simbad/sim-coo?Coord=%.6f%+.6f&CooFrame=FK5&CooEpoch=2000&CooEqui=2000&CooDefinedFrames=none&Radius=20&Radius.unit=arcsec&submit=submit+query&CoordList=" % (row['ra'], row['dec']))
print("RA+Dec: %.6f%+.6f" % (row['ra'], row['dec']))
print("RA: %.6f" % row['ra'])
print("Dec: %.6f" % row['dec'])
def show_lightcurve(idx):
row = wtf_df.iloc[idx]
#print_links(row)
plot_lightcurve(row)
plt.title(idx)
print("Score: %.3f" % detect_dippers_row(row))
a = np.where(cut & (df['bp_rp'] > 0.5) & (df['bp_rp'] < 2) & (abs_mag < 1000) & (abs_mag > 5.))
for i in a[0]:
show_lightcurve(i)
###Output
Score: 2.566
Score: 2.795
Score: 2.650
Score: 2.909
Score: 2.530
Score: 2.687
Score: 2.845
Score: 2.584
Score: 2.648
Score: 3.264
Score: 3.082
###Markdown
Run the spark query Run and save the query
###Code
%%time
# Run on spark
res = (
ztf
.exclude_duplicates()
#.region(ra1=295, ra2=296, dec1=20, dec2=21)
.where(
(sparkfunc.col("nobs_g") >= 10)
| (sparkfunc.col("nobs_r") >= 10)
| (sparkfunc.col("nobs_i") >= 10)
)
.select(
'*',
detect_dippers_udf(
ztf['mjd_g'],
ztf['mag_g'],
ztf['magerr_g'],
ztf['xpos_g'],
ztf['ypos_g'],
ztf['catflags_g'],
ztf['mjd_r'],
ztf['mag_r'],
ztf['magerr_r'],
ztf['xpos_r'],
ztf['ypos_r'],
ztf['catflags_r']
).alias('dipper'),
)
.where(
(sparkfunc.col("dipper.significance") > 10.)
)
#.write.parquet('./query_test_23.parquet')
.write.parquet('./query_full_4.parquet')
)
###Output
_____no_output_____
###Markdown
Convert to an axs table
###Code
wtf = spark_session.read.parquet('./query_full_4.parquet')
%matplotlib inline
from ipywidgets import interact, IntSlider
def interact_lightcurve(idx, zoom=False):
show_lightcurve(res[idx], zoom=zoom)
interact(interact_lightcurve, idx=IntSlider(0, 0, len(res) - 1))
catalog.save_axs_table(wtf, 'wtf_full_oct19_4', repartition=True)
###Output
_____no_output_____ |
notebooks/gold standard/3. recon price.ipynb | ###Markdown
Read IRSM FORM
###Code
irsmform = xml_parser.get_files('irsmform xml', folder = 'linear TSR logs')
irsmout = xml_parser.get_files('out xml', folder = 'linear TSR logs')
csv = xml_parser.get_files('CMS 10y csv', folder = 'linear TSR logs')
replic_basket = csv_parser.parse_csv(csv)
cal_basket = list(xml_parser.get_calib_basket(irsmform))
settings = xml_parser.get_model_settings(irsmform)
main_curve, sprds = xml_parser.get_rate_curves(irsmform)
dsc_curve = main_curve
try:
estim_curve = sprds[0]
except TypeError:
estim_curve = main_curve
n = settings.SpotIterations
dsc_adj_cms_flows = []
mean_rev = xml_parser.get_tsr_params(irsmform).meanRevTSRSwapRate
for swo in cal_basket:
pmnt_date = swo.payment_dates[0]
mr = mean_rev(swo.start_date)
adj_cms_flow = tsr.cmsflow(swo, dsc_curve, estim_curve, n, mr, pmnt_date).adjCMSrate
dsc_adj_cms_flows.append(adj_cms_flow * dsc_curve(pmnt_date))
dsc_adj_cms_flows = array(dsc_adj_cms_flows)
xml_parser.get_tsr_params(irsmform).meanRevTSRSwapRate
###Output
_____no_output_____ |
Big-Data-Clusters/CU8/Public/content/cert-management/cer025-upload-management-service-proxy-cert.ipynb | ###Markdown
CER025 - Upload existing Management Proxy certificate=====================================================Use this notebook to upload an externally generated Management Proxycertificate to a cluster.Steps----- Parameters
###Code
local_certificate_dir = "mssql-cluster-certificates"
certificate_file_name = "service-proxy-certificate.pem"
private_key_file_name = "service-proxy-privatekey.pem"
test_cert_store_root = "/var/opt/secrets/test-certificates"
app_name = "mgmtproxy"
prefix_keyfile_name = "service-proxy"
###Output
_____no_output_____
###Markdown
Common functionsDefine helper functions used in this notebook.
###Code
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportability, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
# Display an install HINT, so the user can click on a SOP to install the missing binary
#
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
try:
j = load_json("cer025-upload-management-service-proxy-cert.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"expanded_rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["expanded_rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']}
error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]}
install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']}
###Output
_____no_output_____
###Markdown
Get the Kubernetes namespace for the big data clusterGet the namespace of the Big Data Cluster use the kubectl command lineinterface .**NOTE:**If there is more than one Big Data Cluster in the target Kubernetescluster, then either:- set \[0\] to the correct value for the big data cluster.- set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio.
###Code
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True)
except:
from IPython.display import Markdown
print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.")
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}')
###Output
_____no_output_____
###Markdown
Get name of the ‘Running’ `controller` `pod`
###Code
# Place the name of the 'Running' controller pod in variable `controller`
controller = run(f'kubectl get pod --selector=app=controller -n {namespace} -o jsonpath={{.items[0].metadata.name}} --field-selector=status.phase=Running', return_output=True)
print(f"Controller pod name: {controller}")
###Output
_____no_output_____
###Markdown
Create folder on `controller` to hold the certificate
###Code
run(f'kubectl exec {controller} -n {namespace} -c controller -- bash -c "mkdir -p {test_cert_store_root}/" ')
###Output
_____no_output_____
###Markdown
Get name of the ‘Running’ `controller` `pod`
###Code
# Place the name of the 'Running' controller pod in variable `controller`
controller = run(f'kubectl get pod --selector=app=controller -n {namespace} -o jsonpath={{.items[0].metadata.name}} --field-selector=status.phase=Running', return_output=True)
print(f"Controller pod name: {controller}")
###Output
_____no_output_____
###Markdown
Copy certificates to `controller` `pod`
###Code
import os
import tempfile
path = os.path.join(tempfile.gettempdir(), local_certificate_dir)
os.chdir(path)
run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "mkdir -p {test_cert_store_root}/{app_name}"')
run(f'kubectl cp {certificate_file_name} {controller}:{test_cert_store_root}/{app_name}/{prefix_keyfile_name}-certificate.pem -c controller -n {namespace}')
run(f'kubectl cp {private_key_file_name} {controller}:{test_cert_store_root}/{app_name}/{prefix_keyfile_name}-privatekey.pem -c controller -n {namespace}')
print('Notebook execution complete.')
###Output
_____no_output_____ |
AutoEncoder_Reconstruction.ipynb | ###Markdown
Importing Datasets
###Code
path = 'drive/My Drive//Colab Notebooks//datasets//jets//'
df_train = pd.read_csv(path+'train_export_jets.csv',delimiter=',')
df_test = pd.read_csv(path+'test_export_jets.csv',delimiter=',')
df_validation = pd.read_csv(path+'val_export_jets.csv',delimiter=',')
df_train.drop(labels=['Unnamed: 0'],axis=1,inplace=True)
df_test.drop(labels=['Unnamed: 0'],axis=1,inplace=True)
df_validation.drop(labels=['Unnamed: 0'],axis=1,inplace=True)
def remove_zero_padded_features(df,padding = 5):
df_feature_description = df.describe()
df_filtered = pd.DataFrame()
indexes = np.array(range(0,df_feature_description.shape[1],padding))
for ii in indexes:
cols = df.columns[ii:ii+padding]
if np.all(df_feature_description.loc['max',cols] == 0)\
and np.all(df_feature_description.loc['min',cols] == 0):
continue
df_filtered[cols] = df[cols]
return df_filtered
X_train = remove_zero_padded_features(df_train.drop(labels=['class','njets'],axis=1))
selected_columns = X_train.columns
X_test = df_test[selected_columns]
X_val = df_validation[selected_columns]
y_train = df_train['class'].astype(int).values
y_test = df_test['class'].astype(int).values
y_val = df_validation['class'].astype(int).values
X_train.shape
###Output
_____no_output_____
###Markdown
Separating sets and Pre-processing
###Code
X_train_no_anomaly = df_train[df_train['class'] < 1.0][selected_columns].copy()
X_train_anomaly = df_train[df_train['class'] == 1.0][selected_columns].copy()
X_val_no_anomaly = df_validation[df_validation['class'] < 1.0][selected_columns].copy()
X_val_anomaly = df_validation[df_validation['class'] == 1.0][selected_columns].copy()
X_test_no_anomaly = df_test[df_test['class'] < 1.0][selected_columns].copy()
X_test_anomaly = df_test[df_test['class'] == 1.0][selected_columns].copy()
scaler1 = MinMaxScaler(feature_range=(-1,1)).fit(X_train)
X_train_norm_no_anomaly = scaler1.transform(X_train_no_anomaly)
X_val_norm_no_anomaly = scaler1.transform(X_val_no_anomaly)
X_test_norm_no_anomaly = scaler1.transform(X_test_no_anomaly)
# scaler2 = StandardScaler().fit(X_train_anomaly)
# X_train_norm_anomaly = scaler2.transform(X_train_anomaly)
# X_val_norm_anomaly = scaler2.transform(X_val_anomaly)
# X_test_norm_anomaly = scaler2.transform(X_test_anomaly)
X_train_norm = scaler1.transform(X_train)
X_val_norm = scaler1.transform(X_val)
X_test_norm = scaler1.transform(X_test)
###Output
_____no_output_____
###Markdown
Configuring AE
###Code
def build_simple_autoencoder():
nb_epoch = 500
batch_size = 32
input_dim = X_train_norm_no_anomaly.shape[1]
encoding_dim = 55
hidden_dim = int(encoding_dim / 2)
learning_rate = 1e-1
input_layer = Input(shape=(input_dim, ))
#encoder
encoder = Dense(encoding_dim)(input_layer)
encoder = BatchNormalization()(encoder)
encoder = Activation(activation='relu')(encoder)
encoder = Dense(hidden_dim)(encoder)
encoder = BatchNormalization()(encoder)
encoder = Activation(activation='relu')(encoder)
encoder = Dense(int(hidden_dim/2))(encoder)
encoder = BatchNormalization()(encoder)
encoder = Activation(activation='relu')(encoder)
#decoder
decoder = Dense(hidden_dim)(encoder)
decoder = BatchNormalization()(decoder)
decoder = Activation(activation='relu')(decoder)
decoder = Dense(encoding_dim)(decoder)
decoder = BatchNormalization()(decoder)
decoder = Activation(activation='relu')(decoder)
decoder = Dense(input_dim)(decoder)
decoder = BatchNormalization()(decoder)
decoder = Activation(activation='tanh')(decoder)
autoencoder = Model(inputs=input_layer, outputs=decoder)
autoencoder.summary()
return autoencoder
autoencoder = build_simple_autoencoder()
autoencoder.compile(metrics=['accuracy'],
loss='mean_squared_error',
optimizer='adam')
cp = ModelCheckpoint(filepath="autoencoder_classifier_reconstruction.h5",
save_best_only=True,
verbose=0)
earlyStopping = EarlyStopping(monitor = 'val_loss', patience = 30, mode = 'auto')
start_time = time.time()
print('Starting Training for AutoEncoder...')
history = autoencoder.fit(X_train_norm_no_anomaly, X_train_norm_no_anomaly,
epochs=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_data=(X_val_norm_no_anomaly, X_val_norm_no_anomaly),
verbose=1,
callbacks=[cp,earlyStopping]).history
print('Total elapsed time:' ,(time.time() - start_time), 'seconds')
plt.style.use('seaborn-whitegrid')
plt.figure(figsize=(12, 8))
plt.plot(history['loss'], linewidth=2, label='Train')
plt.plot(history['val_loss'], linewidth=2, label='Valid')
plt.legend(loc='upper right')
plt.title('Model loss - AutoEncoder for Reconstruction')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.grid(True)
plt.savefig('Model_loss_AutoEncoder_reconstruction.png')
plt.show()
def mean_squared_error(real,prediction):
return np.mean(((real - prediction)**2), axis=1)
def rmse(real,prediction):
return np.mean(((real - prediction)**2)**0.5, axis=1)
X_train_no_anomaly_pred = autoencoder.predict(X_train_norm_no_anomaly)
X_train_rmse_array = rmse(X_train_no_anomaly_pred,X_train_norm_no_anomaly)
X_train_rmse = np.mean(X_train_rmse_array)
print('rmse = ',X_train_rmse)
df_train_no_anomaly = pd.DataFrame(X_train_norm_no_anomaly,columns=selected_columns)
display(pd.DataFrame(X_train_norm_no_anomaly,columns=selected_columns).head())
display(pd.DataFrame(X_train_no_anomaly_pred,columns=selected_columns).head())
X_train_rmse_array.min()
X_predictions = autoencoder.predict(X_val_norm)
mse = mean_squared_error(X_val_norm, X_predictions)
error_df = pd.DataFrame({'Reconstruction_error': mse,
'True_class': y_val})
# 'True_class': y_train[y_train < 1.0]})
precision_rt, recall_rt, threshold_rt = precision_recall_curve(error_df.True_class, error_df.Reconstruction_error)
plt.style.use('seaborn-whitegrid')
plt.figure(figsize=(14,8))
plt.plot(threshold_rt[:-1], precision_rt[1:-1], label="Precision",linewidth=5)
plt.plot(threshold_rt[:-1], recall_rt[1:-1], label="Recall",linewidth=5)
plt.title('Precision and recall for different MSE threshold values')
plt.xlabel('MSE Threshold')
plt.ylabel('Precision/Recall')
plt.legend()
plt.xlim(0,0.05)
plt.grid(True)
# plt.savefig('Precision_Recall_AutoEncoder_reconstruction.png')
X_test_pred = autoencoder.predict(X_test_norm)
y_pred_class = mean_squared_error(X_test_pred, X_test_norm).ravel()
threshold = 0.005
y_pred_class[y_pred_class > threshold] = 1
y_pred_class[y_pred_class <= threshold] = 0
###Output
_____no_output_____
###Markdown
Results
###Code
df_prediction = pd.DataFrame({'Predicted': y_pred_class.ravel(),
'True_class': y_test.ravel()})
LABELS = ["Background","Signal"]
conf_matrix = confusion_matrix(df_prediction.True_class, df_prediction.Predicted)
plt.figure(figsize=(8, 8))
sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt="d");
plt.title("Confusion matrix")
plt.xlabel('True class')
plt.ylabel('Predicted class')
plt.show()
target_names = ["Background","Signal"]
c = classification_report(df_prediction.True_class, df_prediction.Predicted,target_names = target_names)
print(c)
###Output
_____no_output_____ |
Notebooks/pyro/2-stochastic-variational-inference.ipynb | ###Markdown
Introduction to stochastic variational inference in pyro Inferring coin bias with the beta-binomial model
###Code
import numpy as np
import pyro.distributions as dist
import pyro
from torch.distributions import constraints
from pyro.optim import Adam
from pyro.infer import SVI, Trace_ELBO
import torch
from tqdm import tqdm
import arviz as az
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Specify the joint density
###Code
def model(data):
alpha0 = torch.tensor(10.0)
beta0 = torch.tensor(10.0)
f = pyro.sample("latent_fairness", dist.Beta(alpha0, beta0))
for i in range(len(data)):
pyro.sample("obs_{}".format(i), dist.Bernoulli(f), obs=data[i]) # Don't worry, we will vectorize this later
###Output
_____no_output_____
###Markdown
Specify the variational family
###Code
def guide(data): # guide and model must have the same signature, despite us not actually using the data in this case
alpha_q = pyro.param("alpha_q", torch.tensor(15.0), # optimization initialization
constraint=constraints.positive, # constrained optimization
) # requires_grad is automatically set to True
beta_q = pyro.param("beta_q", torch.tensor(15.0),
constraint=constraints.positive)
pyro.sample("latent_fairness", dist.Beta(alpha_q, beta_q))
###Output
_____no_output_____
###Markdown
Specify optimizer
###Code
adam_params = {"lr": 0.0005, "betas": (0.90, 0.999)}
optimizer = Adam(adam_params)
svi = SVI(model, guide, optimizer, loss=Trace_ELBO())
d = torch.tensor(np.random.binomial(n=1, p=0.55, size=10), dtype=float)
n_steps = 1000
for step in tqdm(range(n_steps)):
svi.step(d)
###Output
100%|██████████| 1000/1000 [00:04<00:00, 222.70it/s]
###Markdown
Grab the learned variational parameters
###Code
alpha_q = pyro.param("alpha_q").item()
beta_q = pyro.param("beta_q").item()
def summarize_beta(alpha, beta):
inferred_mean = alpha / (alpha + beta)
factor = beta / (alpha * (1.0 + alpha + beta))
inferred_std = inferred_mean * np.sqrt(factor)
return inferred_mean, inferred_std
summarize_beta(alpha_q, beta_q)
###Output
_____no_output_____
###Markdown
Conditional independence and subsampling The objective is to not have to touch every data point during inference, but rather approximate the log likelihood with mini-batches. Let $\boldsymbol{x}$ denote a data vector of observations, and $\boldsymbol{z}$ denote a vector of latent random variables $$\sum_{i=1}^N \log p(\boldsymbol{x}_i | \boldsymbol{z}) \approx \frac{N}{M} \sum_{i \in \mathcal{I}_M}^N \log p(\boldsymbol{x}_i | \boldsymbol{z})$$ where $\mathcal{I}_M$ is a mini-batch of indices of size $M$. To do this, we require the variational family to be a **conditionally conjugate model**, see [Blei's review](https://arxiv.org/pdf/1601.00670.pdf). The `pyro.plate` allows us to encode conditional independence in the model. Let's do that:
###Code
def model_vec(data):
alpha0 = torch.tensor(10.0)
beta0 = torch.tensor(10.0)
f = pyro.sample("latent_fairness", dist.Beta(alpha0, beta0))
for i in pyro.plate("data_loop", len(data)):
pyro.sample("obs_{}".format(i), dist.Bernoulli(f), obs=data[i]) # this allows us to leverage conditional independence of the observations given the latent variables
###Output
_____no_output_____
###Markdown
Let's make this more efficient by:- Vectorizing - Subsampling, so we can mini-batch
###Code
def model_vec_subsampled(data):
alpha0 = torch.tensor(10.0)
beta0 = torch.tensor(10.0)
f = pyro.sample("latent_fairness", dist.Beta(alpha0, beta0))
with pyro.plate("observe_data",
# Size is required so that the correct scaling factor can be computed
size=len(data),
# We only evaluate the log likelihood for 5 randomly chosen datapoints in the data,
# and the log likelihood will automatically get scaled by N/M
subsample_size=5,
# set the device to use a GPU
# device =
# A stateful subsampling scheme may be necessary -- it is possible to never touch
# some data points if the dataset is sufficiently large
# subsample =
) as ind:
pyro.sample("obs", dist.Bernoulli(f), obs=data.index_select(0, ind)) # this will be a tensor of length 5
svi = SVI(model_vec_subsampled, guide, optimizer, loss=Trace_ELBO())
n_steps = 1000
for step in tqdm(range(n_steps)):
svi.step(d)
alpha_q = pyro.param("alpha_q").item()
beta_q = pyro.param("beta_q").item()
summarize_beta(alpha_q, beta_q)
###Output
_____no_output_____
###Markdown
Conditionally conjugate models We may also have conditional independence in the variational distribution (the `guide`) too. Let $\beta$ be a vector of global latent variables, which potentially govern any of the data. Let $z$ be a vector of local latent variables, whose $i$th component only governs data in the $i$th "context". The joint density of a conditionally conjugate model is:$$p(\beta, \boldsymbol{z}, \boldsymbol{x}) = p(\beta) \prod_{i=1}^n p(z_i, x_i | \beta)$$ The variational family (according to the pyro docs, though I don't yet get how this gels with the review) should factorize like$$p(\beta, \boldsymbol{z}) = p(\beta) \prod_{i=1}^n p(z_i | \beta, \lambda_i)$$where $\lambda_i$ are local variational parameters (other variational parameters are left implicit). To achieve this in `pyro`, a `plate` should be used in both the model and the guide, ensuring that the guide's conditional independence structure respects that of the model. MAP estimation Consider a mixture of Gaussians model$$\mu_k \sim N(0, \sigma^2), k=1,...,K$$$$c_i \sim \text{Cat}(1/K, ..., 1/K), i=1,...,n$$$$x_i|c_i,\mu \sim \mathcal{N}(c_i^T \mu, 1), i=1,...,n$$ We will train a MAP estimator of $\mu_k$ by constructing a Dirac-distribution guide using `AutoDelta`.See the example in [the docs](https://pyro.ai/examples/gmm.html) also [these notes](https://bookdown.org/robertness/causalml/docs/tutorial-on-deep-probabilitic-modeling-with-pyro.htmlgaussian-mixture-model-1).
###Code
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
from pyro.infer import config_enumerate
from pyro import poutine
from pyro.contrib.autoguide import AutoDelta
from pyro.infer import TraceEnum_ELBO, config_enumerate
np.random.seed(42)
K = 2
n = 100
sigma_mu = 10.
mu_k = np.random.normal(0, sigma_mu, size=K)
ci = np.random.choice(K, size=n)
sigma = 1
d = []
for i, c in enumerate(ci):
d.append(np.random.normal(mu_k[c], sigma))
d = np.hstack(d)
mu_k
plt.hist(d, bins=int(sigma_mu)*2);
pyro.enable_validation(True)
data = torch.tensor(d, dtype=torch.float)
@config_enumerate
def model_mixture_of_gaussians(data):
weights = torch.ones(K, dtype=torch.float)/K
with pyro.plate("mu", K):
locs = pyro.sample("locs", dist.Normal(0., sigma_mu))
with pyro.plate("data", len(data)):
assignment = pyro.sample('assignment', dist.Categorical(weights))
pyro.sample('obs', dist.Normal(locs[assignment], 1.), obs=data)
# Let pyro make a guide automatically for us, using Delta distributions to find the parameters of interest (the component locations)
auto_guide = AutoDelta(poutine.block(model_mixture_of_gaussians, expose=['locs'])) # automatically makes a variational distribution from the model for MAP estimation
optim = pyro.optim.Adam({'lr': 0.1, 'betas': [0.8, 0.99]})
elbo = TraceEnum_ELBO(max_plate_nesting=1)
svi = SVI(model_mixture_of_gaussians, auto_guide, optim, loss=elbo)
def initialize(seed, guide):
pyro.set_rng_seed(seed)
pyro.clear_param_store()
# Initialize means from a subsample of data.
pyro.param('auto_locs', data[torch.multinomial(torch.ones(len(data)) / len(data), K)]);
loss = svi.loss(model_mixture_of_gaussians, guide, data)
return loss
# Choose the best among 100 random initializations.
loss, seed = min((initialize(seed, auto_guide), seed) for seed in range(100))
initialize(seed, auto_guide)
print('seed = {}, initial_loss = {}'.format(seed, loss))
losses = []
for i in tqdm(range(200)):
loss = svi.step(data)
losses.append(loss)
plt.figure(figsize=(10,3), dpi=100).set_facecolor('white')
plt.plot(losses)
plt.xlabel('iters')
plt.ylabel('loss')
plt.yscale('log')
plt.title('Convergence of SVI');
map_estimates = auto_guide(data)
locs = map_estimates['locs']
locs
mu_k
###Output
_____no_output_____ |
notebooks/PopulationGrowth.ipynb | ###Markdown
We use a population growth model $$Pa = P * (1+r)^{-T}$$where: P = initial population valueT = Time span (years)r = Growth ratePa = Adjusted populationOur growth rates come from a UN spreadsheet, found here: http://esa.un.org/unpd/wpp/Download/Standard/Population/The spreadsheet has growth rates per country, in half-decade increments. The basic function *adjust_pop* is available through the public interface.
###Code
tpop = 2015
tevent = 2016
ccode = 841 #US
pop = 1e6
rate = 0.01 #1% growth rate
newpop = adjust_pop(pop,tpop,tevent,rate)
print('Adjusted population is: %s' % (format(int(newpop),",d")))
###Output
Adjusted population is: 1,010,000
###Markdown
We can have negative population growth...
###Code
tpop = 2016
tevent = 2015
pop = 1e6
rate = 0.01 #1% growth rate
newpop = adjust_pop(pop,tpop,tevent,rate)
print('Adjusted population is: %s' % (format(int(newpop),",d")))
###Output
Adjusted population is: 990,099
###Markdown
Normally we will use the PopulationGrowth class, created from the UN spreadsheet.
###Code
excelfile = os.path.join(os.getcwd(),'..','test','data','WPP2015_POP_F02_POPULATION_GROWTH_RATE.xls')
pg = PopulationGrowth.loadFromUNSpreadsheet(excelfile)
###Output
_____no_output_____
###Markdown
We can get all the rates for a given (ISO numeric) country code.
###Code
years,rates = pg.getRates(840)
for year,rate in zip(years,rates):
print('%i: %.5f' % (year,rate))
plt.plot(years,rates);
plt.xlabel('Year');
plt.ylabel('Population Growth Rate');
plt.title('United States Growth Rates');
###Output
1950: 0.01581
1955: 0.01724
1960: 0.01373
1965: 0.00987
1970: 0.00885
1975: 0.00948
1980: 0.00945
1985: 0.00985
1990: 0.01035
1995: 0.01211
2000: 0.00915
2005: 0.00907
2010: 0.00754
###Markdown
We can also just get a single year.
###Code
rate = pg.getRate(840,1963)
print(rate)
###Output
0.01373
###Markdown
Finally, and perhaps most usefully, we can use this class to apply the population growth rates for a country to a population data set (scalar or array).
###Code
population = 1e6
startyear = 1993
endyear = 2016
newpop = pg.adjustPopulation(population,'US',startyear,endyear)
print(format(int(newpop),",d"))
###Output
1,306,742
|
mavenn/development/20.08.26_load_models_for_JBK/.ipynb_checkpoints/mavenn_load_model_MPSA-checkpoint.ipynb | ###Markdown
Imports and set path to local mavenn
###Code
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from numpy.random import default_rng
import warnings
warnings.filterwarnings('ignore')
import sys
path_to_mavenn_local = '/Users/tareen/Desktop/Research_Projects/2020_mavenn_github/mavenn'
sys.path.insert(0,path_to_mavenn_local)
import mavenn
from mavenn.src.utils import get_example_dataset
from mavenn.src.utils import ge_plots_for_mavenn_demo
from mavenn.src.utils import onehot_encode_array, _generate_nbr_features_from_sequences, _generate_all_pair_features_from_sequences
from mavenn.src.utils import _center_matrix
import tensorflow as tf
import tensorflow.keras.backend as K
# Path being used:
mavenn.__path__
###Output
_____no_output_____
###Markdown
Load data, and estimate instrinsic information
###Code
# Load these data so that we may use test data for predictions.
MPSA_data = pd.read_csv('20.08.16_mpsa_data/brca2_lib1_rep1.csv')
MPSA_data.head()
X = MPSA_data['ss'].values
y = MPSA_data['log_psi'].values
dy = MPSA_data['dlog_psi'].values
ix = (y > 0) & (dy > 0)
mavenn.estimate_instrinsic_information(y[ix],dy[ix],True)
# split data into test, we only need test data here, the loaded
# model will already contain the training data it was trained on
x_train, x_test, y_train, y_test = train_test_split(X, y, random_state = 0)
###Output
_____no_output_____
###Markdown
Load model from file
###Code
# GER_additive = mavenn.load('model_files/gaussian_mpsa_model_additive')
# GER_pairwise = mavenn.load('model_files/gaussian_mpsa_model')
GER_additive = mavenn.load('model_files/skewT_mpsa_model_additive')
GER_pairwise = mavenn.load('model_files/skewT_mpsa_model_pairwise')
# load_config = pd.read_csv('model_files/skewT_mpsa_model_pairwise.csv', index_col=[0])
# #len(load_config)
#type(load_config['ge_nonlinearity_monotonic'].loc[0])
GER_additive.gpmap_type, GER_pairwise.gpmap_type
###Output
_____no_output_____
###Markdown
Make predictions and compute latent phenotype values
###Code
# predictions
yhat_additive = GER_additive.x_to_yhat(x_test)
# evaluate phi for sequences
phi_additive = GER_additive.x_to_phi(x_test)
# equalate g(phi) for continuous phi
phi_range_additive = np.linspace(min(phi_additive),max(phi_additive),1000)
y_hat_GE_additive = GER_additive.phi_to_yhat(phi_range_additive)
# noise model that is used to get eta parameters
qs_additive = GER_additive.yhat_to_yq(y_hat_GE_additive,q=np.array([0.16,0.84]))
# predictions
yhat_pairwise = GER_pairwise.x_to_yhat(x_test)
# evaluate phi for sequences
phi_pairwise = GER_pairwise.x_to_phi(x_test)
# equalate g(phi) for continuous phi
phi_range_pairwise = np.linspace(min(phi_pairwise),max(phi_pairwise),1000)
y_hat_GE_pairwise = GER_pairwise.phi_to_yhat(phi_range_pairwise)
# noise model that is used to get eta parameters
qs_pairwise = GER_pairwise.yhat_to_yq(y_hat_GE_pairwise,q=np.array([0.16,0.84]))
SH_train = pd.read_excel('Sailer_Harms_Spline_fit_MPSA_data.xlsx',sheet_name='train')
SH_test = pd.read_excel('Sailer_Harms_Spline_fit_MPSA_data.xlsx',sheet_name='test')
SH_line = pd.read_excel('Sailer_Harms_Spline_fit_MPSA_data.xlsx',sheet_name='line')
qs_pairwise.shape
###Output
_____no_output_____
###Markdown
Show plots
###Code
fig, ax = plt.subplots(3,2,figsize=(8,12))
Rsq = np.corrcoef(yhat_additive.ravel(),y_test)[0][1]**2
ax[0,0].scatter(yhat_additive,y_test,s=5,alpha=0.4)
ax[0,0].set_xlabel('Predictions (test)')
ax[0,0].set_ylabel('Observations (test)')
ax[0,0].set_title('$R^2$: '+str(Rsq)[0:5]+' (additive)')
ax[0,1].plot(phi_range_additive,GER_additive.phi_to_yhat(phi_range_additive))
ax[0,1].scatter(phi_additive,y_test,s=0.25, alpha=0.4, label='Observations')
ax[0,1].plot(phi_range_additive,GER_additive.phi_to_yhat(phi_range_additive),lw=2,label='$\hat{y}$',alpha=1.0,color='black')
for q_index in range(qs_additive.shape[1]):
ax[0,1].plot(phi_range_additive,qs_additive[:,q_index].ravel(),color='orange',lw=2,alpha=0.85,label='$\hat{y} \pm \sigma(\hat{y})$')
ax[0,1].set_ylabel('Observations')
ax[0,1].set_xlabel('Latent phenotype ($\phi$)')
ax[0,1].set_title(GER_additive.ge_noise_model_type+' Likelihood, (additive)')
Rsq = np.corrcoef(yhat_pairwise.ravel(),y_test)[0][1]**2
ax[1,0].scatter(yhat_pairwise,y_test,s=5,alpha=0.4)
ax[1,0].set_xlabel('Predictions (test)')
ax[1,0].set_ylabel('Observations (test)')
ax[1,0].set_title('$R^2$: '+str(Rsq)[0:5]+' (pairwise)')
ax[1,1].plot(phi_range_pairwise,GER_pairwise.phi_to_yhat(phi_range_pairwise))
ax[1,1].scatter(phi_pairwise,y_test,s=0.25, alpha=0.4, label='Observations')
ax[1,1].plot(phi_range_pairwise,GER_pairwise.phi_to_yhat(phi_range_pairwise),lw=2,label='$\hat{y}$',alpha=1.0,color='black')
for q_index in range(qs_pairwise.shape[1]):
ax[1,1].plot(phi_range_pairwise,qs_pairwise[:,q_index].ravel(),color='orange',lw=2,alpha=0.85,label='$\hat{y} \pm \sigma(\hat{y})$')
ax[1,1].set_ylabel('Observations')
ax[1,1].set_xlabel('Latent phenotype ($\phi$)')
ax[1,1].set_title(GER_pairwise.ge_noise_model_type+' Likelihood, (pairwise)')
ax[2,1].scatter(SH_train['y_add'].values,SH_train['y_obs'].values,s=0.25,alpha=0.25)
ax[2,1].plot(SH_line['y_add_line'].values,SH_line['y_obs_line'].values,color='black',lw=2)
ax[2,1].set_xlabel('Latent phenotype ($\phi$)')
ax[2,1].set_ylabel('Observations')
ax[2,1].set_title('Sailer-Harms Spline Epistasis (additive)')
Rsq = np.corrcoef(SH_test['yhat_test'].values,SH_test['y_test'].values)[0][1]**2
ax[2,0].scatter(SH_test['yhat_test'].values,SH_test['y_test'].values,s=5,alpha=0.4)
ax[2,0].set_xlabel('Predictions (test)')
ax[2,0].set_ylabel('Observations (test)')
ax[2,0].set_title('$R^2$: '+str(Rsq)[0:5]+' (Spline Epistasis 5th Order)')
plt.tight_layout()
plt.show()
GER_additive.x_to_phi('TAGGCTTCA'),GER_pairwise.x_to_phi('TAGGCTTCA')
###Output
_____no_output_____ |
1.Study/2. with computer/4.Programming/2.Python/9. Numpy/ch1/summary_ch1.ipynb | ###Markdown
- Making ndarray- Information About ndarray : shape, size/itemsize, dtype- ndarray Indexing and Slicing- Reshape Resize and Vectorization Quiz ```[ch1]1. making ndarray 방법 5가지(entry가 무엇이든)2. ndarray의 information 중, entry 당 소비하는 memory 크기를 반환하는 함수는?3. ndarray의 information 중, entry 갯수를 반환하는 함수는?4. a가 3x4의 list type일 때, (1,1) entry를 인덱싱하는 방법은? / a가 ndarray라면?5. ndarray.resize((3,4)) / ndarray.reshape((3,4)) 의 차이점은?``` **Chapter1 ndarray_Notebook1 Making ndarray1-2**```1. np.array( [ ] )2. np.zeros( shape )3. np.ones( shape )4. np.empty( shape )5. np.full( shape, fill_value )``````1. np.zeros_like( ndarray )2. np.ones_like( ndarray )3. np.empty_like( ndarray )4. np.full_like( ndarray, fill_value )``` **Chapter1 ndarray: Notebook3 Information About ndarray**```1. shape np.array.shape => return ( , ) 2. size np.array.size => return "entry 갯수" ex) np.ones((2,3)).size => return 6 np.array.itemsize => return "entry 당 소비하는 memory 크기" ex) np.ones((2,3), dtype=np.int) => return 64 np.ones((2,3), dtype=np.int8) => return 8 *numpy는 기본적으로 64bit를 할당함. image데이터는 entry당 8bit이므로, default 64비트를 할당할 경우, entry당 (64-8)bit의 memory loss가 발생하는 것. 따라서, 주의해서 dtype을 설정해야 함.3. dtype np.array.dtype ``` **Chapter1 ndarray: Notebook4-5 ndarray Indexing and Slicing**```1. list 타입의 matrix indexing / slicing a = list([[1,2],[2,3],[3,4]]) a[1][1] => 32. ndarray 타입의 matrix indexing / slicing b = np.array(a) b[1,1] => 3 3. list / ndarray의 indexing/slicing 차이 list => 인덱싱/슬라이싱 시, 복사본을 만들어 return. ndarray => 인덱싱/슬라이싱 시, 원본을 그대로 참조해 return. (따라서, 원본 값이 바뀌면, 인덱싱/슬라이싱한 주머니 값도 바뀜) *따라서, ndarray를 인덱싱/슬라이싱 시, np.array[1:,].copy()로 list처럼 복사본을 만들어 저장하도록 하기도 함. *ndarray는 워낙 큰 데이터를 다루므로, memory consumption 최소화하기 위한 철학이 깔려있는 것.``` **Chapter1 ndarray: Notebook6 Reshape Resize and Vectorization** **reshape, resize는 같은 역할**```1.numpy.reshape2.numpy.resize3.ndarray.reshape4.ndarray.resize```[차이점]1. resize => in_place 연산이라는 점이 차이2. reshape => return 값이 본 데이터의 참조값. 따라서, 원본이 바뀌면 reshape한 데이터도 바뀜 *원본 바뀌어도 reshape 데이터 영향 받길 원치 않으면, np.array.reshape( () ).copy() 로 '.copy()'활용 ex) a = np.full((2,6), fill_value=2) b = a.reshape((4,3)) a[1] = 10 => b(reshape된)도 바뀜
###Code
a = np.full((3,4), fill_value=3)
a
b = a.reshape((2,6))
c = a.reshape((2,6)).copy()
b
a[0] = 10
b
c
###Output
_____no_output_____ |
.ipynb_checkpoints/Pharma Procurement System-checkpoint.ipynb | ###Markdown
Input universe of discourse for input variables
###Code
x_stock = x_budget = x_demand = np.arange(0, 11)
#x_budget = np.arange(0, 1.1, 0.1)
###Output
_____no_output_____
###Markdown
membership functions Stock
###Code
low_stock = fuzz.trimf(x_stock, [0, 0, 3])
medium_stock = fuzz.trimf(x_stock, [2, 5, 8])
high_stock = fuzz.trimf(x_stock, [7, 10, 10])
###Output
_____no_output_____
###Markdown
Budget
###Code
low_budget = fuzz.sigmf(x_budget, 2, -4)
medium_budget = fuzz.gaussmf(x_budget, 5, 1)
high_budget = fuzz.sigmf(x_budget, 8, 4)
###Output
_____no_output_____
###Markdown
Demand
###Code
low_demand = fuzz.trimf(x_demand, [0, 0, 3])
medium_demand = fuzz.trimf(x_demand, [2, 5, 8])
high_demand = fuzz.trimf(x_demand, [7, 10, 10])
###Output
_____no_output_____
###Markdown
Creating figures to show membership functions
###Code
_, (stock_axis, budget_axis, demand_axis) = plt.subplots(nrows=3, figsize=(8, 9))
# adding title to axes
stock_axis.set_title('Stock')
budget_axis.set_title('Budget')
demand_axis.set_title('Demand')
# adding membership functions to their respective axes
## stock axis
stock_axis.plot(x_stock, low_stock, 'g', label='Low')
stock_axis.plot(x_stock, medium_stock, 'r', label='Medium')
stock_axis.plot(x_stock, high_stock, 'b', label='High')
## budget axis
budget_axis.plot(x_budget, low_budget, 'g', label='Low')
budget_axis.plot(x_budget, medium_budget, 'r', label='Medium')
budget_axis.plot(x_budget, high_budget, 'b', label='High')
## demand axis
demand_axis.plot(x_demand, low_demand, 'g', label='Low')
demand_axis.plot(x_demand, medium_demand, 'r', label='Medium')
demand_axis.plot(x_demand, high_demand, 'b', label='High')
# adding legends to axes
stock_axis.legend()
budget_axis.legend()
demand_axis.legend()
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Output universe of discourse for output
###Code
x_order = np.arange(0, 11)
###Output
_____no_output_____
###Markdown
membership function
###Code
#order_none = fuzz.trapmf(x_order, [0, 0, 10, 10])
order_none = fuzz.trimf(x_stock, [0, 0, 2])
order_some = fuzz.trimf(x_stock, [1, 3, 5])
order_half = fuzz.trimf(x_stock, [4, 5, 6])
order_more_than_half = fuzz.trimf(x_stock, [5, 7, 9])
order_all = fuzz.trimf(x_stock, [8, 10, 10])
#order_all = fuzz.trimf(x_order, [0, 0.1, 0.2])
_, (order_axis) = plt.subplots(nrows=1, figsize=(8, 3))
order_axis.plot(x_order, order_none, 'r', label='None')
order_axis.plot(x_order, order_some, 'b', label='Some')
order_axis.plot(x_order, order_half, 'g', label='Half')
order_axis.plot(x_order, order_more_than_half, 'y', label='> half')
order_axis.plot(x_order, order_all, color='pink', label='All')
order_axis.legend()
plt.tight_layout()
###Output
_____no_output_____
###Markdown
User inputs for inferencing
###Code
stock_input = 1
budget_input = 9
demand_input = 8
stock_level_low = fuzz.interp_membership(x_stock, low_stock, stock_input)
stock_level_medium = fuzz.interp_membership(x_stock, medium_stock, stock_input)
stock_level_high = fuzz.interp_membership(x_stock, high_stock, stock_input)
print(stock_level_low, stock_level_medium, stock_level_high)
budget_level_low = fuzz.interp_membership(x_budget, low_budget, budget_input)
budget_level_medium = fuzz.interp_membership(x_budget, medium_budget, budget_input)
budget_level_high = fuzz.interp_membership(x_budget, high_budget, budget_input)
print(budget_level_low, budget_level_medium, budget_level_high)
demand_level_low = fuzz.interp_membership(x_demand, low_demand, demand_input)
demand_level_medium = fuzz.interp_membership(x_demand, medium_demand, demand_input)
demand_level_high = fuzz.interp_membership(x_demand, high_demand, demand_input)
print(demand_level_low, demand_level_medium, demand_level_high)
###Output
0.6666666666666666 0.0 0.0
6.914400106935423e-13 0.00033546262790251185 0.9820137900379085
0.0 0.0 0.3333333333333333
###Markdown
Rule base
###Code
rule_1 = np.fmin(stock_level_low, np.fmin(demand_level_low, budget_level_low))
rule_2 = np.fmin(stock_level_low, np.fmin(demand_level_low, budget_level_medium))
rule_3 = np.fmin(stock_level_low, np.fmin(demand_level_low, budget_level_high))
rule_4 = np.fmin(stock_level_low, np.fmin(demand_level_medium, budget_level_low))
rule_5 = np.fmin(stock_level_low, np.fmin(demand_level_medium, budget_level_medium))
rule_6 = np.fmin(stock_level_low, np.fmin(demand_level_medium, budget_level_high))
rule_7 = np.fmin(stock_level_low, np.fmin(demand_level_high, budget_level_low))
rule_8 = np.fmin(stock_level_low, np.fmin(demand_level_high, budget_level_medium))
rule_9 = np.fmin(stock_level_low, np.fmin(demand_level_high, budget_level_high))
rule_10 = np.fmin(stock_level_medium, np.fmin(demand_level_low, budget_level_low))
rule_11 = np.fmin(stock_level_medium, np.fmin(demand_level_low, budget_level_medium))
rule_12 = np.fmin(stock_level_medium, np.fmin(demand_level_low, budget_level_high))
rule_13 = np.fmin(stock_level_medium, np.fmin(demand_level_medium, budget_level_low))
rule_14 = np.fmin(stock_level_medium, np.fmin(demand_level_medium, budget_level_medium))
rule_15 = np.fmin(stock_level_medium, np.fmin(demand_level_medium, budget_level_high))
rule_16 = np.fmin(stock_level_medium, np.fmin(demand_level_high, budget_level_low))
rule_17 = np.fmin(stock_level_medium, np.fmin(demand_level_high, budget_level_medium))
rule_18 = np.fmin(stock_level_medium, np.fmin(demand_level_high, budget_level_high))
rule_19 = stock_level_high
# none as output
activation_none = np.fmin(rule_1, np.fmin(rule_10, np.fmin(rule_11, np.fmin(rule_13, np.fmin(rule_19, order_none)))))
# some as output
activation_some = np.fmin(rule_2, np.fmin(rule_4, np.fmin(rule_7,np.fmin(rule_12,np.fmin(rule_14,np.fmin(rule_16, order_half))))))
# half as output
activation_half = np.fmin(rule_3, np.fmin(rule_5, np.fmin(rule_6, np.fmin(rule_8, order_half))))
# more_than_half as output
activation_more_than_half = np.fmin(rule_6, np.fmin(rule_16, np.fmin(rule_18, order_more_than_half)))
# all as output
activation_all = np.fmin(rule_9, np.fmin(rule_18, order_all))
print(activation_none, activation_some, activation_half, activation_more_than_half, activation_all)
order0 = np.zeros_like(x_order)
fig, ax0 = plt.subplots(figsize=(8, 3))
ax0.fill_between(x_order, order0, activation_none, facecolor='b', alpha=0.7)
ax0.plot(x_order, order_none, 'b', linewidth=0.5, linestyle='--', )
ax0.fill_between(x_order, order0, activation_some, facecolor='g', alpha=0.7)
ax0.plot(x_order, order_some, 'g', linewidth=0.5, linestyle='--')
ax0.fill_between(x_order, order0, activation_half, facecolor='r', alpha=0.7)
ax0.plot(x_order, order_half, 'r', linewidth=0.5, linestyle='--')
ax0.fill_between(x_order, order0, activation_more_than_half, facecolor='Orange', alpha=0.7)
ax0.plot(x_order, order_more_than_half, 'o', linewidth=0.5, linestyle='--')
ax0.fill_between(x_order, order0, activation_all, facecolor='Yellow', alpha=0.7)
ax0.plot(x_order, order_all, 'y', linewidth=0.5, linestyle='--')
ax0.set_title('Output membership activity')
plt.tight_layout()
aggregated = np.fmax(activation_none, np.fmax(activation_some, np.fmax(activation_half, np.fmax(activation_more_than_half, activation_all))))
print(aggregated)
# Calculate defuzzified result
output_order = fuzz.defuzz(x_order, aggregated, 'centroid')
output_order_activation = fuzz.interp_membership(x_order, aggregated, output_order)
# Visualize this
fig, ax0 = plt.subplots(figsize=(8, 3))
ax0.plot(x_order, activation_none, 'b', linewidth=0.5, linestyle='--', )
ax0.plot(x_order, activation_some, 'b', linewidth=0.5, linestyle='--', )
ax0.plot(x_order, activation_half, 'b', linewidth=0.5, linestyle='--', )
ax0.plot(x_order, activation_more_than_half, 'b', linewidth=0.5, linestyle='--', )
ax0.plot(x_order, activation_all, 'b', linewidth=0.5, linestyle='--', )
ax0.fill_between(x_order, order0, aggregated, facecolor='Purple', alpha=0.7)
ax0.plot([output_order, output_order], [0, output_order_activation], 'k', linewidth=1.5, alpha=0.9)
ax0.set_title('Aggregated membership and result (line)')
###Output
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
|
src/triage/component/audition/example_audition_notebook.ipynb | ###Markdown
Setting up the Auditioner instanceCurrently you need to specify the set of `model_group_id`s and `train_end_time`s you want to use manually, so here we're reading a few sets out of the database.Additionally, you need to specify a name for the best distance table when creating the `Auditioner` and should ensure it doesn't already exist.For simplicity, we'll just look at precision@300_abs here.
###Code
conn = catwalk.db.connect()
sel = """
SELECT model_group_id
FROM results.model_groups
WHERE model_config->>'label_definition' = 'any_serious_violation'
ORDER BY RANDOM()
;
"""
model_groups = list(pd.read_sql(sel,conn)['model_group_id'])
sel = """
SELECT DISTINCT train_end_time
FROM results.models
WHERE model_group_id IN ({})
AND EXTRACT(MONTH FROM train_end_time) IN (1,4,7,10)
AND train_end_time >= '2012-01-01'
ORDER BY train_end_time
;
""".format(', '.join(map(str, model_groups)))
end_times = list(pd.read_sql(sel, conn)['train_end_time'])
aud = Auditioner(
db_engine = conn,
model_group_ids = model_groups,
train_end_times = end_times,
initial_metric_filters = [{'metric': 'precision@', 'parameter': '300_abs', 'max_from_best': 1.0, 'threshold_value': 0.0}],
models_table = 'models',
distance_table = 'kr_test_dist'
)
###Output
_____no_output_____
###Markdown
Plotting the best distance metric and groups over timeThis is done with the `plot_model_groups` method and may take a minute to generate.
###Code
aud.plot_model_groups()
###Output
_____no_output_____
###Markdown
Applying thresholds to weed out bad modelsHere we use the `update_metric_filters` to apply a set of filters to the model groups we're considering in order to elminate poorly performing ones. The model groups will be plotted again after updating the filters.
###Code
aud.update_metric_filters(
[{
'metric': 'precision@',
'parameter': '300_abs',
'max_from_best': 0.2,
'threshold_value': 0.0
}]
)
###Output
_____no_output_____
###Markdown
Apply a round of filtering, starting with no threshold_value and a fairly wide margin on max_from_best
###Code
# how many model groups are left after the first round of filtering?
len(aud.thresholded_model_group_ids)
###Output
_____no_output_____
###Markdown
That didn't thin things out too much, so let's get a bit more agressive with both parameters:
###Code
aud.update_metric_filters([{
'metric': 'precision@',
'parameter': '300_abs',
'max_from_best': 0.1,
'threshold_value': 0.5
}])
len(aud.thresholded_model_group_ids)
###Output
_____no_output_____
###Markdown
That's starting to look better, but we can probably narrow even a bit more...
###Code
aud.update_metric_filters([{
'metric': 'precision@',
'parameter': '300_abs',
'max_from_best': 0.05,
'threshold_value': 0.65
}])
len(aud.thresholded_model_group_ids)
###Output
_____no_output_____
###Markdown
This looks like a better set of prospective models to consider. Could potentially even back off a little bit, but certainly seems like we've cleared out most of the worst models. Applying selection rules and calculating regrets for the narrowed set of modelsThe goal of audition is to narrow a very large number of model groups to a small number of best candidates, ideally making use of the full time series of information. There are several ways one could consider doing so, using over-time averages of the metrics of interest, weighted averages to balance between metrics, the distance from best metrics, and balancing metric average values and stability. Audition formalizes this idea through "selection rules" that take in the data up to a given point in time, apply some rule to choose a model group, and evaluate the performance of that chosen model in the subsequent time window, the `regret`. You can register, evaluate, and update selection rules associated with the `Auditioner` object as shown below.
###Code
seln_rules = [{
'shared_parameters': [
{'metric': 'precision@', 'parameter': '300_abs'}
],
'selection_rules': [
{'name': 'best_current_value'},
{'name': 'best_average_value'},
{'name': 'most_frequent_best_dist', 'dist_from_best_case': [0.01, 0.05, 0.1, 0.15]}
]
},
{
'shared_parameters': [
{'metric': 'precision@', 'parameter': '300_abs'}
],
'selection_rules': [
{'name': 'best_avg_recency_weight', 'curr_weight': [1.5, 2.0, 5.0], 'decay_type': ['linear']}
]
},
{
'shared_parameters': [{}],
'selection_rules': [{'name': 'random_model_group'}]
}]
aud.register_selection_rule_grid(seln_rules)
###Output
_____no_output_____
###Markdown
Finally, when you have a selection rule grid you're happy with, the `selection_rule_model_group_ids` parameter of the `Auditioner` will give you the model groups chosen by the selection rules in the grid when applied to the most recent end time for use in application:
###Code
aud.selection_rule_model_group_ids
###Output
_____no_output_____ |
A3 - Bird Classification Challenge/Mask R-CNN/demo/Mask_R-CNN_demo.ipynb | ###Markdown
Mask R-CNN demoThis notebook illustrates one possible way of using `maskrcnn_benchmark` for computing predictions on images from an arbitrary URL.Let's start with a few standard imports
###Code
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import requests
from io import BytesIO
from PIL import Image
import numpy as np
import os, sys
import cv2
sys.path.append(os.path.dirname(os.getcwd()))
# this makes our figures bigger
pylab.rcParams['figure.figsize'] = 20, 12
import torch
print(torch.__version__)
###Output
_____no_output_____
###Markdown
Those are the relevant imports for the detection model
###Code
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
###Output
_____no_output_____
###Markdown
We provide a helper class `COCODemo`, which loads a model from the config file, and performs pre-processing, model prediction and post-processing for us.We can configure several model options by overriding the config options.In here, we make the model run on the CPU
###Code
config_file = "../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
###Output
_____no_output_____
###Markdown
Now we create the `COCODemo` object. It contains a few extra options for conveniency, such as the confidence threshold for detections to be shown.
###Code
coco_demo = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.6,
)
###Output
_____no_output_____
###Markdown
Let's define a few helper functions for loading images from a URL
###Code
def load(url):
"""
Given an url of an image, downloads the image and
returns a PIL image
"""
response = requests.get(url)
pil_image = Image.open(BytesIO(response.content)).convert("RGB")
# convert to BGR format
image = np.array(pil_image)[:, :, [2, 1, 0]]
return image
def imshow(img):
plt.imshow(img[:, :, [2, 1, 0]])
plt.axis("off")
###Output
_____no_output_____
###Markdown
Let's now load an image from the COCO dataset. It's reference is in the comment
###Code
# from http://cocodataset.org/#explore?id=345434
path = "/home/amine/Documents/3A MVA/Semestre 1/Object Recognition and Computer Vision/HW3/bird_dataset/train_images/021.Eastern_Towhee"
# image = load("http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg")
image = cv2.imread(path+"/Eastern_Towhee_0117_22741.jpg")
# cv2.imshow('Bird example',image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
###Output
_____no_output_____
###Markdown
Computing the predictionsWe provide a `run_on_opencv_image` function, which takes an image as it was loaded by OpenCV (in `BGR` format), and computes the predictions on them, returning an image with the predictions overlayed on the image.
###Code
# compute predictions
coco_demo = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.7,
)
# predictions = coco_demo.run_on_opencv_image(image)
# imshow(predictions)
# plt.show()
cropImage(image, coco_demo)
def cropImage(image, cocomodel):
predictions = cocomodel.compute_prediction(image)
top_predictions = cocomodel.select_top_predictions(predictions)
result = image.copy()
masks = top_predictions.get_field("mask").numpy()
labels = top_predictions.get_field("labels")
colors = cocomodel.compute_colors_for_labels(labels).tolist()
contours = None
for mask, color in zip(masks, colors):
thresh = mask[0, :, :, None]
_, contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
image = cv2.drawContours(result, contours, -1, color, 3)
idx=0
if contours==None:
return None
for i,c in enumerate(contours):
x,y,w,h = cv2.boundingRect(c)
if w>50 and h>50:
idx+=1
new_img=image[y:y+h,x:x+w]
cv2.imshow('Mask R-CNN example',new_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return new_img
# cv2.imshow('image', new_img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# from http://cocodataset.org/#explore?id=345434
# path = "/home/amine/Documents/3A MVA/Semestre 1/Object Recognition and Computer Vision/HW3/bird_dataset/train_images/021.Eastern_Towhee"
# # image = load("http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg")
# image = cv2.imread(path+"/Eastern_Towhee_0117_22741.jpg")
# # imshow(image)
# # plt.show()
# cropImage(image, coco_demo)
import os
rootDir = "../../../bird_dataset/"
def list_files(dir):
r = []
for root, dirs, files in os.walk(dir):
for name in files:
r.append(os.path.join(root, name))
return r
directories = list_files(rootDir)
# Generating cropped images for each image in directories
for i,p in enumerate(directories):
if i%100==0:
print(i*100/len(directories))
image = cv2.imread(p)
new_img = cropImage(image, coco_demo)
if new_img != None:
cv2.imwrite(p[:len(p)-4]+'_cropped.jpg',new_img)
# print(p[:len(p)-4]+'Z.jpg')
# imshow(image)
# plt.show()
# predictions = coco_demo.compute_prediction(image)
# top_predictions = coco_demo.select_top_predictions(predictions)
# result = image.copy()
# # if self.show_mask_heatmaps:
# # return self.create_mask_montage(result, top_predictions)
# # result = self.overlay_boxes(result, top_predictions)
# if coco_demo.cfg.MODEL.MASK_ON:
# image = result
# predictions = top_predictions
# masks = predictions.get_field("mask").numpy()
# labels = predictions.get_field("labels")
# colors = coco_demo.compute_colors_for_labels(labels).tolist()
# for mask, color in zip(masks, colors):
# thresh = mask[0, :, :, None]
# _, contours, hierarchy = cv2.findContours(
# thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
# )
# # image = cv2.drawContours(image, contours, -1, color, 3)
# composite = image
# plt.imshow(composite)
# plt.show()
# cv2.imwrite(p[:len(p)-4]+'Z.jpg',composite)
# print(p[:len(p)-4]+'Z.jpg')
# result = self.overlay_class_names(result, top_predictions)
###Output
_____no_output_____ |
Step 2 - Data Preprocessing/5. RSG_Oversample with DCGAN.ipynb | ###Markdown
Ready, Steady, Go AI (*Tutorial*) This tutorial is a supplement to the paper, **Ready, Steady, Go AI: A Practical Tutorial on Fundamentals of Artificial Intelligence and Its Applications in Phenomics Image Analysis** (*Patterns, 2021*) by Farid Nakhle and Antoine HarfoucheRead the accompanying paper [here](https://doi.org/10.1016/j.patter.2021.100323). Table of contents * **1. Background*** **2. Downloading Segmented Images*** **3. Balancing the Healthy Class with DCGAN** 1. Background **Why do we need to balance a dataset?**Data imbalance refers to an unequal distribution of classes within a dataset. In such scenario, a classification model could become biased, inaccurate and might produce unsatisfactory results. Therefore, we balance the dataset either by oversampling the minority class or undersampling the majority classes. To demonstrate the two scenarios, both oversampling and undersampling will be applied. Here, we will oversample the healthy class in the training set using the deep convolutional generative adversarial network (DCGAN) algorithm.**What is DCGAN?**Generative adversial network (GAN) algorithm was designed to generate new data instances that resemble training data. The idea was to pair two learning models, typically two ANNs, named generator and discriminator where the former learns to produce synthetic data, while the latter learns to distinguish true data from the output of the generator. During training, the generator tries to deceive the discriminator by synthesizing better data, while the discriminator becomes a better classifier. The equilibrium of this zero-sum game is reached when the discriminator can no longer distinguish real images from fakes.DCGAN is very similar to GAN, except that it uses convolutional and convolutional-transpose layers in the discriminator and generator, respectively, making it more suitable for synthesizing imaging data.Here, we provided the DCGAN algorithm with the healthy class of the training set (1272 images) to train it on generating synthetic leaves, and thus, oversampling the set to 1500 images 2. Downloading Segmented Images As a reminder, we are working with the PlantVillage dataset, originally obtained from [here](http://dx.doi.org/10.17632/tywbtsjrjv.1).For this tutorial, we will be working with a subset of PlantVillage, where we will choose the tomato classes only. We have made the subset available [here](http://dx.doi.org/10.17632/4g7k9wptyd.1). The next code will automatically download the dataset segmented with SegNet.**It is important to note that Colab deletes all unsaved data once the instance is recycled. Therefore, remember to download your results once you run the code.**
###Code
import requests
import os
import zipfile
## FEEL FREE TO CHANGE THESE PARAMETERS
dataset_url = "http://faridnakhle.com/pv/tomato-split-cropped-segmented.zip"
save_data_to = "/content/dataset/tomato-segmented/"
dataset_file_name = "tomato-segmented.zip"
#######################################
if not os.path.exists(save_data_to):
os.makedirs(save_data_to)
r = requests.get(dataset_url, stream = True, headers={"User-Agent": "Ready, Steady, Go AI"})
print("Downloading dataset...")
with open(save_data_to + dataset_file_name, "wb") as file:
for block in r.iter_content(chunk_size = 1024):
if block:
file.write(block)
## Extract downloaded zip dataset file
print("Dataset downloaded")
print("Extracting files...")
with zipfile.ZipFile(save_data_to + dataset_file_name, 'r') as zip_dataset:
zip_dataset.extractall(save_data_to)
## Delete the zip file as we no longer need it
os.remove(save_data_to + dataset_file_name)
print("All done!")
###Output
Downloading dataset...
Dataset downloaded
Extracting files...
All done!
###Markdown
3. Balancing the Healthy Class with DCGAN We will import PyTorch as we will use it for the implementation of DCGAN
###Code
import argparse
import os
import numpy as np
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
## YOU CAN CHANGE THESE VARIABLES
n_epochs = 500
batch_size = 50
lr = 0.0002
b1 = 0.7 #adam: decay of first order momentum of gradient
b2 = 0.999 #adam: decay of first order momentum of gradient
n_cpu = 1
latent_dim = 100 #dimensionality of the latent space
img_size = 224
channels = 3 #R, G, and B
sample_interval = 400 #interval between image sampling
######################################################
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.init_size = img_size // 4
self.l1 = nn.Sequential(nn.Linear(latent_dim, 128 * self.init_size ** 2))
self.conv_blocks = nn.Sequential(
nn.BatchNorm2d(128),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, channels, 3, stride=1, padding=1),
nn.Tanh(),
)
def forward(self, z):
out = self.l1(z)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
if bn:
block.append(nn.BatchNorm2d(out_filters, 0.8))
return block
self.model = nn.Sequential(
*discriminator_block(channels, 16, bn=False),
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
ds_size = img_size // 2 ** 4
self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())
def forward(self, img):
out = self.model(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
return validity
def trainDCGAN():
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
os.makedirs("images", exist_ok=True)
cuda = True if torch.cuda.is_available() else False
load_from_checkpoint = False
# Loss function
adversarial_loss = torch.nn.BCELoss()
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# train set
data_path = '/content/dataset/tomato-segmented/'
train_dir = data_path + 'train/'
train_dataset = datasets.ImageFolder(
train_dir,
transforms.Compose([
transforms.Resize(size=(img_size, img_size)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]))
dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True,
num_workers=n_cpu, pin_memory=True)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=lr, betas=(b1, b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=lr, betas=(b1, b2))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# ----------
# Load from Checkpoint
# ----------
if (load_from_checkpoint):
checkpointName = "./images/checkpoint_epoch_1000.pth"
checkpoint = torch.load(checkpointName)
generator.load_state_dict(checkpoint['G_state_dict'])
discriminator.load_state_dict(checkpoint['D_state_dict'])
optimizer_G.load_state_dict(checkpoint['G_optimizer'])
optimizer_D.load_state_dict(checkpoint['D_optimizer'])
print("Loaded CheckPoint: " + checkpointName)
if cuda:
generator.cuda()
discriminator.cuda()
# ----------
# Training
# ----------
for epoch in range(n_epochs):
for i, (imgs, _) in enumerate(dataloader):
# Adversarial ground truths
valid = Variable(Tensor(imgs.shape[0], 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(imgs.shape[0], 1).fill_(0.0), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(Tensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise as generator input
z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], latent_dim))))
# Generate a batch of images
gen_imgs = generator(z)
# Loss measures generator's ability to fool the discriminator
g_loss = adversarial_loss(discriminator(gen_imgs), valid)
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Measure discriminator's ability to classify real from generated samples
real_loss = adversarial_loss(discriminator(real_imgs), valid)
fake_loss = adversarial_loss(discriminator(gen_imgs.detach()), fake)
d_loss = (real_loss + fake_loss) / 2
d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% ((epoch + 1), n_epochs, (i + 1), len(dataloader), d_loss.item(), g_loss.item())
)
batches_done = epoch * len(dataloader) + i
if batches_done % sample_interval == 0:
save_image(gen_imgs.data[:25], "images/%d.png" % batches_done, nrow=5, normalize=True)
if epoch % 500 == 0:
torch.save({
'G_state_dict': generator.state_dict(),
'D_state_dict': discriminator.state_dict(),
'G_optimizer': optimizer_G.state_dict(),
'D_optimizer': optimizer_D.state_dict(),
}, "images/checkpoint_epoch_" + str(epoch) +".pth")
torch.save({
'G_state_dict': generator.state_dict(),
'D_state_dict': discriminator.state_dict(),
'G_optimizer': optimizer_G.state_dict(),
'D_optimizer': optimizer_D.state_dict(),
}, "images/checkpoint_final.pth")
print("Training complete")
###Output
_____no_output_____
###Markdown
**NB: To make running this notebook faster and our results easily reproducable, we made our trained model available and we will load it after this section. Thus, you might skip this next code block**
###Code
trainDCGAN()
###Output
_____no_output_____
###Markdown
**In the next section, we will load our trained model to make our results reproducable. You can change the loading path to use your own instead**
###Code
##########################
### DOWNLOAD THE MODEL ###
##########################
## FEEL FREE TO CHANGE THESE PARAMETERS
model_URL = "http://faridnakhle.com/pv/models/RSGAI_DCGAN.zip"
save_data_to = "/content/models/"
model_file_name = "dcgan.zip"
#######################################
if not os.path.exists(save_data_to):
os.makedirs(save_data_to)
print("Downloading model...")
r = requests.get(model_URL, stream = True, headers={"User-Agent": "Ready, Steady, Go AI"})
with open(save_data_to + model_file_name, "wb") as file:
for block in r.iter_content(chunk_size = 1024):
if block:
file.write(block)
## Extract downloaded zip dataset file
print("Model downloaded")
print("Extracting files...")
with zipfile.ZipFile(save_data_to + model_file_name, 'r') as zip_dataset:
zip_dataset.extractall(save_data_to)
print("All done!")
###Output
Downloading model...
Model downloaded
Extracting files...
All done!
###Markdown
Now that we have a trained DCGAN model, we can use it to generate healthy tomato leaf images
###Code
def GenerateImages(modelPath, outPutFolder, IMGS2GENERATE):
if not os.path.exists(outPutFolder):
os.makedirs(outPutFolder)
## YOU CAN CHANGE THESE VARIABLES
n_epochs = 1
batch_size = 50
lr = 0.0002
b1 = 0.7 #adam: decay of first order momentum of gradient
b2 = 0.999 #adam: decay of first order momentum of gradient
n_cpu = 1
latent_dim = 100 #dimensionality of the latent space
img_size = 224
channels = 3 #R, G, and B
sample_interval = 400 #interval between image sampling
######################################################
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
cuda = True if torch.cuda.is_available() else False
load_from_checkpoint = True
# Loss function
adversarial_loss = torch.nn.BCELoss()
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=lr, betas=(b1, b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=lr, betas=(b1, b2))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# ----------
# Load from Checkpoint
# ----------
if (load_from_checkpoint):
checkpointName = modelPath
checkpoint = torch.load(checkpointName)
generator.load_state_dict(checkpoint['G_state_dict'])
discriminator.load_state_dict(checkpoint['D_state_dict'])
optimizer_G.load_state_dict(checkpoint['G_optimizer'])
optimizer_D.load_state_dict(checkpoint['D_optimizer'])
print("Loaded CheckPoint: " + checkpointName)
if cuda:
generator.cuda()
discriminator.cuda()
# ----------
# Generating images
# ----------
for i in range (0, IMGS2GENERATE):
z = Variable(Tensor(np.random.normal(0, 1, (1, latent_dim))))
# Generate a batch of images
gen_imgs = generator(z)
save_image(gen_imgs.data, outPutFolder + "/DCGAN_%d.png" % (i + 1), nrow=0, normalize=True)
GenerateImages('/content/models/RSGAI_DCGAN.pth', '/content/output/', IMGS2GENERATE = 228)
print("Data Generated")
###Output
Loaded CheckPoint: /content/models/RSGAI_DCGAN.pth
Data Generated
###Markdown
Let's preview some of the generated data
###Code
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
imgPath = '/content/output/DCGAN_'
imageOne = mpimg.imread(imgPath + "1.png")
imageTen = mpimg.imread(imgPath + "10.png")
plt.axis('off')
plt.imshow(imageOne)
plt.show()
plt.axis('off')
plt.imshow(imageTen)
plt.show()
###Output
_____no_output_____ |
Dance_Form.ipynb | ###Markdown
###Code
import os
import zipfile
local_zip='/content/0664343c9a8f11ea.zip'
zip_ref=zipfile.ZipFile(local_zip,'r')
zip_ref.extractall("/content")
zip_ref.close()
os.mkdir("/content/train")
#!cp normal ~/.train
#!chmod 600 ~/.train/normal
import pandas as pd
train=pd.read_csv('/content/dataset/train.csv')
train.head()
test=pd.read_csv('/content/dataset/test.csv')
test.head()
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from tqdm import tqdm
from keras.preprocessing import image
train_images=os.listdir('/content/dataset/train')
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
###Output
_____no_output_____
###Markdown
Pipelining the images
###Code
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
preprocessor = ColumnTransformer(
transformers=[
('cat', categorical_transformer, train['target'])
])
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Building the model
###Code
model=tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16,(3,3),activation='relu',input_shape=(150,150,3)),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Conv2D(32,(3,3),activation='relu'),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Conv2D(64,(3,3),activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512,activation='relu'),
tf.keras.layers.Dense(1,activation='sigmoid')
])
model.summary()
import numpy as np
y = np.array(train['target'])
y.shape
X_train, X_test, y_train, y_test = train_test_split(train_images, y, random_state=42, test_size=0.1)
from tensorflow.keras.optimizers import RMSprop
model.compile(optimizer=RMSprop(lr=0.001),
loss='categorical_crossentropy',
metrics=['acc'])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen=ImageDataGenerator(
rescale=1./255,
width_shift_range=0.2,
height_shift_range=0.2,
rotation_range=40,
zoom_range=0.2,
horizontal_flip=True,
shear_range=0.2,
fill_mode='nearest'
)
train_generator=train_datagen.flow_from_dataframe(train_images,
target_size=(150,150),
class_mode='categorical',
batch_size=20,
)
model.fit(X_train, train[''], steps_per_epoch=50, epochs=25)
###Output
_____no_output_____ |
python/zlib-binascii/py-zlib-binascii.ipynb | ###Markdown
Py-file-zlib-binascii Python对文件的处理,可以压缩和解压缩操作,这里以bin文件形式对进行讲解。 通过rb模式打开bin文件,读取出来的为bytes类型,详情可见: https://docs.python.org/3/tutorial/inputoutput.htmlreading-and-writing-files。 Python库简单说明: 1、zlib库用于对文件的压缩和解压缩操作。 2、binascii库用于二进制和ASCII之间的转化。
###Code
import zlib
help(zlib)
import binascii
help(binascii)
import zlib
import binascii
with open('hello_world.bin', 'rb') as f:
# Step1: compress file data
tmp = zlib.compress(f.read())
data = binascii.hexlify(tmp)
# Step2: decompress file data
data = binascii.unhexlify(data)
zlib.decompress(data)
###Output
_____no_output_____ |
7-3-exercise-build-regression-model.ipynb | ###Markdown
Exercise: Build a simple logistic regression modelIn this exercise, we'll fit a simple logistic regression model that will try to predict the chance of an avalanche. Recall that logistic regression fits an s-shaped curve to data, rather than a straight line, and we can use this to calculate a probability of a binary outcome. Data visualizationLet's start this exercise by loading in and having a look at our data:
###Code
import pandas
!pip install statsmodels
!wget https://raw.githubusercontent.com/MicrosoftDocs/mslearn-introduction-to-machine-learning/main/graphing.py
!wget https://raw.githubusercontent.com/MicrosoftDocs/mslearn-introduction-to-machine-learning/main/Data/avalanche.csv
#Import the data from the .csv file
dataset = pandas.read_csv('avalanche.csv', delimiter="\t")
#Let's have a look at the data
dataset
###Output
Requirement already satisfied: statsmodels in /anaconda/envs/py37_default/lib/python3.7/site-packages (0.12.2)
Requirement already satisfied: scipy>=1.1 in /anaconda/envs/py37_default/lib/python3.7/site-packages (from statsmodels) (1.5.2)
Requirement already satisfied: numpy>=1.15 in /anaconda/envs/py37_default/lib/python3.7/site-packages (from statsmodels) (1.19.2)
Requirement already satisfied: patsy>=0.5 in /anaconda/envs/py37_default/lib/python3.7/site-packages (from statsmodels) (0.5.1)
Requirement already satisfied: pandas>=0.21 in /anaconda/envs/py37_default/lib/python3.7/site-packages (from statsmodels) (1.2.0)
Requirement already satisfied: python-dateutil>=2.7.3 in /anaconda/envs/py37_default/lib/python3.7/site-packages (from pandas>=0.21->statsmodels) (2.8.1)
Requirement already satisfied: pytz>=2017.3 in /anaconda/envs/py37_default/lib/python3.7/site-packages (from pandas>=0.21->statsmodels) (2020.5)
Requirement already satisfied: six in /anaconda/envs/py37_default/lib/python3.7/site-packages (from patsy>=0.5->statsmodels) (1.15.0)
--2021-08-27 02:15:31-- https://raw.githubusercontent.com/MicrosoftDocs/mslearn-introduction-to-machine-learning/main/graphing.py
Resolving raw.githubusercontent.com... 185.199.108.133, 185.199.110.133, 185.199.109.133, ...
Connecting to raw.githubusercontent.com|185.199.108.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 21511 (21K) [text/plain]
Saving to: ‘graphing.py.1’
graphing.py.1 100%[===================>] 21.01K --.-KB/s in 0s
2021-08-27 02:15:31 (110 MB/s) - ‘graphing.py.1’ saved [21511/21511]
--2021-08-27 02:15:32-- https://raw.githubusercontent.com/MicrosoftDocs/mslearn-introduction-to-machine-learning/main/Data/avalanche.csv
Resolving raw.githubusercontent.com... 185.199.108.133, 185.199.110.133, 185.199.109.133, ...
Connecting to raw.githubusercontent.com|185.199.108.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 56578 (55K) [text/plain]
Saving to: ‘avalanche.csv.1’
avalanche.csv.1 100%[===================>] 55.25K --.-KB/s in 0.001s
2021-08-27 02:15:32 (84.1 MB/s) - ‘avalanche.csv.1’ saved [56578/56578]
###Markdown
Data ExplorationThe `avalanche` field is our target. A value of `1` means that an avalanche did occur at the conditions described by the features, whereas a value of `0` means no avalanche hapenned. Since our targets can only be `0` or `1` we call this a *binary classification* model.Now let's plot the relationships between each feature and the target values. That helps us understand which features are more likely to influence the results:
###Code
import graphing # custom graphing code. See our GitHub repo for details
graphing.box_and_whisker(dataset, label_x="avalanche", label_y="surface_hoar", show=True)
graphing.box_and_whisker(dataset, label_x="avalanche", label_y="fresh_thickness", show=True)
graphing.box_and_whisker(dataset, label_x="avalanche", label_y="weak_layers", show=True)
graphing.box_and_whisker(dataset, label_x="avalanche", label_y="no_visitors")
###Output
_____no_output_____
###Markdown
We can notice that:- For `fresh_thickness` the outcomes are very similar. This means that variations in their values aren't strongly correlated with the results.- Variations in values for `weak_layers` and `no_visitors`, seem to correlate with a larger number of `avalanche` results, and thus we should assign more importance to these features.The differences between avalanche and non-avalanche days are small and there isn't one clear driver of issues. Weak layers looks like a good starting point as it is related to the widest variation in results. Building a simple logistic regression modelWe will now built and train a model to predict the chance of an avalanche happening based __solely__ on the number of weak layers of snow:
###Code
# Here we import a function that splits datasets according to a given ratio
from sklearn.model_selection import train_test_split
# Split the dataset in an 70/30 train/test ratio.
train, test = train_test_split(dataset, test_size=0.3, random_state=2)
print(train.shape)
print(test.shape)
###Output
(766, 8)
(329, 8)
###Markdown
OK, lets train our model, using the `train` dataset we've just created (notice that `weak_layers` will be the only feature used to determine the outcome):
###Code
import statsmodels.formula.api as smf
import graphing # custom graphing code. See our GitHub repo for details
# Perform logistic regression.
model = smf.logit("avalanche ~ weak_layers", train).fit()
print("Model trained")
###Output
Optimization terminated successfully.
Current function value: 0.631451
Iterations 5
Model trained
###Markdown
After training, we can print a model summary with very detailed information:
###Code
print(model.summary())
###Output
Logit Regression Results
==============================================================================
Dep. Variable: avalanche No. Observations: 766
Model: Logit Df Residuals: 764
Method: MLE Df Model: 1
Date: Fri, 27 Aug 2021 Pseudo R-squ.: 0.07898
Time: 02:15:33 Log-Likelihood: -483.69
converged: True LL-Null: -525.17
Covariance Type: nonrobust LLR p-value: 8.395e-20
===============================================================================
coef std err z P>|z| [0.025 0.975]
-------------------------------------------------------------------------------
Intercept -0.8586 0.147 -5.856 0.000 -1.146 -0.571
weak_layers 0.2241 0.026 8.648 0.000 0.173 0.275
===============================================================================
###Markdown
Notice that the positive coefficient for `weak_layers` means that a higher value means a higher likelihood for an avalanche. Using our modelWe can now use our trained model to make predictions and estimate probabilities.Let's pick the first four occurrences in our `test` set and print the probability of an avalanche for each one of them:
###Code
# predict to get a probability
# get first 3 samples from dataset
samples = test["weak_layers"][:4]
# use the model to get predictions as possibilities
estimated_probabilities = model.predict(samples)
# Print results for each sample
for sample, pred in zip(samples,estimated_probabilities):
print(f"A weak_layer with value {sample} yields a {pred * 100:.2f}% chance of an avalanche.")
###Output
A weak_layer with value 5 yields a 56.51% chance of an avalanche.
A weak_layer with value 4 yields a 50.95% chance of an avalanche.
A weak_layer with value 7 yields a 67.05% chance of an avalanche.
A weak_layer with value 0 yields a 29.76% chance of an avalanche.
###Markdown
Let's plot out model to understand this:
###Code
# plot the model
# Show a graph of the result
predict = lambda x: model.predict(pandas.DataFrame({"weak_layers": x}))
graphing.line_2D([("Model", predict)],
x_range=[-20,40],
label_x="weak_layers",
label_y="estimated probability of an avalanche")
###Output
_____no_output_____
###Markdown
The line plots the function of the __probability__ of an avalanche over the number of weak layers; Notice that the more weak layers, the more likely an avalanche will happen. This plot can look a bit confusing for two reasons.Firstly, the curve can make predictions from negative to positive infinity, but we only have data for 0 - 10 layers:
###Code
print("Minimum number of weak layers:", min(train.weak_layers))
print("Maximum number of weak layers:", max(train.weak_layers))
###Output
Minimum number of weak layers: 0
Maximum number of weak layers: 10
###Markdown
This is because logistic regression models allow predictions outside the range of values they have seen, and sometimes do so quite well.The second reason the plot is confusing is that at 0 layers, there's still _some_ risk of an avalanche. Similarly, at 10 layers, there isn't a 100% risk of an avalanche. This is actually in line with the data:
###Code
import numpy as np
# Get actual rates of avalanches at 0 years
avalanche_outcomes_for_0_layers = train[train.weak_layers == 0].avalanche
print("Average rate of avalanches for 0 weak layers of snow", np.average(avalanche_outcomes_for_0_layers))
# Get actual rates of avalanches at 10 years
avalanche_outcomes_for_10_layers = train[train.weak_layers == 10].avalanche
print("Average rate of avalanches for 10 weak layers of snow", np.average(avalanche_outcomes_for_10_layers))
###Output
Average rate of avalanches for 0 weak layers of snow 0.3880597014925373
Average rate of avalanches for 10 weak layers of snow 0.7761194029850746
###Markdown
Our model is actually doing a good job! It's just that avalanches aren't _only_ caused by weak layers of snow. If we want to do better, we probably need to think about including other information in the model. Classification or decision thresholdsTo return a binary category (`True` = "avalanche", `False` = "no avalanche") we need to define a *Classification Threshold* value. Any probability above that threshold is returned as the positive category, whereas values below it will be returned as the negative category.Let's see what happens if set our threshold to `0.5` (meaning that our model will return `True` whenever it calculates a chance above 50% of an avalanche happening):
###Code
# threshold to get an absolute value
threshold = 0.5
# Add classification to the samples we used before
for sample, pred in list(zip(samples,estimated_probabilities)):
print(f"A weak_layer with value {sample} yields a chance of {pred * 100:.2f}% of an avalanche. Classification = {pred > threshold}")
###Output
A weak_layer with value 5 yields a chance of 56.51% of an avalanche. Classification = True
A weak_layer with value 4 yields a chance of 50.95% of an avalanche. Classification = True
A weak_layer with value 7 yields a chance of 67.05% of an avalanche. Classification = True
A weak_layer with value 0 yields a chance of 29.76% of an avalanche. Classification = False
###Markdown
Note that a `0.5` threshold is just a starting point that needs to be tuned depending on the data we're trying to classify. Performance on test setNow let's use our `test` dataset to perform a quick evaluation on how the model did. For now, we'll just look at how often we correctly predicted if there would be an avalanche or not
###Code
# Classify the mdel predictions using the threshold
predictions = model.predict(test) > threshold
# Compare the predictions to the actual outcomes in the dataset
accuracy = np.average(predictions == test.avalanche)
# Print the evaluation
print(f"The model correctly predicted outcomes {accuracy * 100:.2f}% of time.")
###Output
The model correctly predicted outcomes 65.05% of time.
|
Modulo3/Tarea6_DuartePablo.ipynb | ###Markdown
Tarea 6. Distribución óptima de capital y selección de portafolios.**Resumen.**> En esta tarea, tendrás la oportunidad de aplicar los conceptos y las herramientas que aprendimos en el módulo 3. Específicamente, utilizarás técnicas de optimización media-varianza para construir la frontera de mínima varianza, encontrar el mejor portafolio sobre la frontera mínima varianza, y finalmente, identificar la asignación óptima de capital para un inversionista dado su nivel de averisón al riesgo.**Criterio de revisión.**> Se te calificará de acuerdo a los resultados finales que reportes, basados en tu análisis.**Antes de comenzar.**> Por favor, copiar y pegar este archivo en otra ubicación. Antes de comenzar, nombrarlo *Tarea6_ApellidoNombre*, sin acentos y sin espacios; por ejemplo, en mi caso el archivo se llamaría *Tarea6_JimenezEsteban*. Resolver todos los puntos en dicho archivo y subir en este espacio. 1. Datos (10 puntos)Considere los siguientes datos de bonos, índice de acciones, mercados desarrollados, mercados emergentes, fondos privados, activos reales y activos libres de riesgo:
###Code
# Importamos pandas y numpy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Resumen en base anual de rendimientos esperados y volatilidades
annual_ret_summ = pd.DataFrame(columns=['Bonos', 'Acciones', 'Desarrollado', 'Emergente', 'Privados', 'Real', 'Libre_riesgo'], index=['Media', 'Volatilidad'])
annual_ret_summ.loc['Media'] = np.array([0.0400, 0.1060, 0.0830, 0.1190, 0.1280, 0.0620, 0.0300])
annual_ret_summ.loc['Volatilidad'] = np.array([0.0680, 0.2240, 0.2210, 0.3000, 0.2310, 0.0680, None])
annual_ret_summ.round(4)
# Matriz de correlación
corr = pd.DataFrame(data= np.array([[1.0000, 0.4000, 0.2500, 0.2000, 0.1500, 0.2000],
[0.4000, 1.0000, 0.7000, 0.6000, 0.7000, 0.2000],
[0.2500, 0.7000, 1.0000, 0.7500, 0.6000, 0.1000],
[0.2000, 0.6000, 0.7500, 1.0000, 0.2500, 0.1500],
[0.1500, 0.7000, 0.6000, 0.2500, 1.0000, 0.3000],
[0.2000, 0.2000, 0.1000, 0.1500, 0.3000, 1.0000]]),
columns=annual_ret_summ.columns[:-1], index=annual_ret_summ.columns[:-1])
corr.round(4)
###Output
_____no_output_____
###Markdown
1. Graficar en el espacio de rendimiento esperado contra volatilidad cada uno de los activos (10 puntos).
###Code
# Activos
E1 = annual_ret_summ.loc['Media','Bonos']
E2 = annual_ret_summ.loc['Media','Acciones']
E3 = annual_ret_summ.loc['Media','Desarrollado']
E4 = annual_ret_summ.loc['Media','Emergente']
E5 = annual_ret_summ.loc['Media','Privados']
E6 = annual_ret_summ.loc['Media','Real']
E7 = annual_ret_summ.iloc[0,6]
# Volatilidades individuales
s1 = annual_ret_summ.loc['Volatilidad','Bonos']
s2 = annual_ret_summ.loc['Volatilidad','Acciones']
s3 = annual_ret_summ.loc['Volatilidad','Desarrollado']
s4 = annual_ret_summ.loc['Volatilidad','Emergente']
s5 = annual_ret_summ.loc['Volatilidad','Privados']
s6 = annual_ret_summ.loc['Volatilidad','Real']
s7 = 0
plt.plot(s1, E1, 'ob', ms=5, label='Índice Bonos')
plt.plot(s2, E2, 'or', ms=5, label='Índice Acciones')
plt.plot(s3, E3, 'oc', ms=5, label='Índice Desarrollado')
plt.plot(s4, E4, 'ok', ms=5, label='Índice Emergente')
plt.plot(s5, E5, 'om', ms=5, label='Índice Privados')
plt.plot(s6, E6, 'oy', ms=5, label='Índice Real')
plt.plot(s7, E7, 'og', ms=5, label='Índice Libre riesgo')
plt.grid()
plt.legend(loc='best')
plt.title('Rendimiento contra Volatilidad')
plt.xlabel('Volatilidad $\sigma')
plt.ylabel('Rendimiento Esperado $\E[r]')
###Output
_____no_output_____
###Markdown
2. Hallando portafolios sobre la frontera de mínima varianza (35 puntos)Usando los datos del punto anterior:1. Halle los pesos del portafolio de mínima varianza considerando todos los activos riesgosos. También reportar claramente el rendimiento esperado, volatilidad y cociente de Sharpe para dicho portafolio (15 puntos).2. Halle los pesos del portafolio EMV considerando todos los activos riesgosos. También reportar claramente el rendimiento esperado, volatilidad y cociente de Sharpe para dicho portafolio (15 puntos).3. Halle la covarianza y la correlación entre los dos portafolios hallados (5 puntos) 1. Halle los pesos del portafolio de mínima varianza considerando todos los activos riesgosos. También reportar claramente el rendimiento esperado, volatilidad y cociente de Sharpe para dicho portafolio (15 puntos).
###Code
# Importamos funcion minimize del modulo optimize de scipy
from scipy.optimize import minimize
rf = 0.03
annual_ret_summ = annual_ret_summ.drop(columns=['Libre_riesgo'])
## Construcción de parámetros
# 1. Sigma: matriz de varianza-covarianza Sigma = S.dot(corr).dot(S)
S = np.diag(annual_ret_summ.loc['Volatilidad'].values)
Sigma = S.dot(corr).dot(S)
# 2. Eind: rendimientos esperados activos individuales
Eind = annual_ret_summ.loc['Media'].values
# Función objetivo
def var(w, Sigma):
return w.T.dot(Sigma).dot(w)
# Número de activos
N = len(Eind)
# Dato inicial
w0 = np.ones(N)/N
# Cotas de las variables
bnds = ((0, 1), ) * N
# Restricciones
cons = {'type': 'eq', 'fun': lambda w: w.sum() - 1}
# Portafolio de mínima varianza
minvar = minimize(fun=var,
x0=w0,
args=(Sigma,),
bounds=bnds,
constraints=cons)
minvar
# Pesos, rendimiento y riesgo del portafolio de mínima varianza
w_minvar = minvar.x
E_minvar = Eind.T.dot(w_minvar)
s_minvar = var(w_minvar, Sigma)**0.5
RS_minvar = (E_minvar - rf) / s_minvar
w_minvar, E_minvar, s_minvar, RS_minvar
###Output
_____no_output_____
###Markdown
2. Halle los pesos del portafolio EMV considerando todos los activos riesgosos. También reportar claramente el rendimiento esperado, volatilidad y cociente de Sharpe para dicho portafolio (15 puntos).
###Code
# Función objetivo
def menos_RS(w, Eind, rf, Sigma):
E_port = Eind.T.dot(w)
s_port = var(w, Sigma)**0.5
RS = (E_port - rf) / s_port
return - RS
# Número de activos
N = len(Eind)
# Dato inicial
w0 = np.ones(N)/N
# Cotas de las variables
bnds = ((0, 1), ) * N
# Restricciones
cons = {'type': 'eq', 'fun': lambda w: w.sum() - 1}
# Portafolio EMV
emv = minimize(fun=menos_RS,
x0=w0,
args=(Eind, rf, Sigma),
bounds=bnds,
constraints=cons)
emv
# Pesos, rendimiento y riesgo del portafolio EMV
w_emv = emv.x
E_emv = Eind.T.dot(w_emv)
s_emv = var(w_emv, Sigma)**0.5
RS_emv = (E_emv - rf) / s_emv
w_emv, E_emv, s_emv, RS_emv
w_minvar, E_minvar, s_minvar, RS_minvar
annual_ret_summ.columns
###Output
_____no_output_____
###Markdown
3. Halle la covarianza y la correlación entre los dos portafolios hallados (5 puntos)
###Code
# Covarianza entre los portafolios
cov_emv_minvar = w_emv.T.dot(Sigma).dot(w_minvar)
cov_emv_minvar
# Correlación entre los portafolios
corr_emv_minvar = cov_emv_minvar / (s_emv * s_minvar)
corr_emv_minvar
###Output
_____no_output_____
###Markdown
3. Frontera de mínima varianza y LAC (30 puntos)Con los portafolios que se encontraron en el punto anterior (de mínima varianza y EMV):1. Construya la frontera de mínima varianza calculando el rendimiento esperado y volatilidad para varias combinaciones de los anteriores portafolios. Reportar dichas combinaciones en un DataFrame incluyendo pesos, rendimiento, volatilidad y cociente de Sharpe (15 puntos).2. También construya la línea de asignación de capital entre el activo libre de riesgo y el portafolio EMV. Reportar las combinaciones de estos activos en un DataFrame incluyendo pesos, rendimiento, volatilidad y cociente de Sharpe (15 puntos).
###Code
# Vector de w
w_p = np.linspace(0, 4)
# DataFrame de portafolios:
# 1. Índice: i
# 2. Columnas 1-2: w, 1-w
# 3. Columnas 3-4: E[r], sigma
# 4. Columna 5: Sharpe ratio
frontera = pd.DataFrame(data={'W': w_p,
'Media': w_p * E_emv + (1 - w_p) * E_minvar,
'Vol': ((w_p * s_emv)**2 + ((1 - w_p) * s_minvar)**2 + 2 * w_p * (1 - w_p) * cov_emv_minvar)**0.5})
frontera['RS'] = (frontera['Media'] - rf) /frontera['Vol']
frontera.head()
sp = np.linspace(0, 0.2)
LAC = pd.DataFrame(data={'Vol': sp,
'Media': RS_emv * sp + rf})
LAC['RS'] = (LAC['Media'] - rf) /LAC['Vol']
LAC.head()
###Output
_____no_output_____
###Markdown
4. Gráficos y conclusiones (25 puntos)1. Usando todos los datos obtenidos, grafique: - los activos individuales, - portafolio de mínima varianza, - portafolio eficiente en media-varianza (EMV), - frontera de mínima varianza, y - línea de asignación de capital,en el espacio de rendimiento (eje $y$) vs. volatilidad (eje $x$). Asegúrese de etiquetar todo y poner distintos colores para diferenciar los distintos elementos en su gráfico (15 puntos).2. Suponga que usted está aconsejando a un cliente cuyo coeficiente de aversión al riesgo resultó ser 4. ¿Qué asignación de capital le sugeriría?, ¿qué significa su resultado?(10 puntos)
###Code
from matplotlib import pyplot as plt
%matplotlib inline
# Gráfica de dispersión de puntos coloreando
# de acuerdo a SR, los activos individuales
# y los portafolios hallados
plt.figure(figsize=(10, 6))
# Frontera
ax = plt.subplot(121) #
plt.scatter(frontera['Vol'], frontera['Media'], c = frontera['RS'], cmap='RdYlBu', label = 'Frontera de minima varianza')
plt.colorbar()
# Port. óptimos
plt.plot(s_minvar, E_minvar, '*g', ms=10, label='Portafolio de mínima varianza')
plt.plot(s_emv, E_emv, '*r', ms=10, label='Portafolio eficiente en media varianza')
plt.plot(LAC['Vol'], LAC['Media'], label = 'LAC')
plt.plot(s1, E1, 'ob', ms=5, label='Índice Bonos')
plt.plot(s2, E2, 'or', ms=5, label='Índice Acciones')
plt.plot(s3, E3, 'oc', ms=5, label='Índice Desarrollado')
plt.plot(s4, E4, 'ok', ms=5, label='Índice Emergente')
plt.plot(s5, E5, 'om', ms=5, label='Índice Privados')
plt.plot(s6, E6, 'oy', ms=5, label='Índice Real')
plt.plot(0, rf, 'og', ms=5, label='Índice Libre riesgo')
plt.xlabel('Volatilidad $\sigma$')
plt.ylabel('Rendimiento esperado $E[r]$')
plt.grid()
ax.legend(bbox_to_anchor=(1.5, 1), loc=2, borderaxespad=0.)
# Para gamma=4
g = 4
w_opt = (E_emv - rf) / (g * s_emv**2)
w_opt
###Output
_____no_output_____ |
RYRZ/RYRZ_VQE_LiH.ipynb | ###Markdown
Variational Quantum Eigensolver - Ground State Energy for $H2$ Molecule using the RYRZ ansatz¶
###Code
import numpy as np
import matplotlib.pyplot as plt
# Importing standard Qiskit libraries
from qiskit import QuantumCircuit, transpile, IBMQ
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from ibm_quantum_widgets import *
from qiskit.providers.aer import QasmSimulator, StatevectorSimulator
from qiskit.utils import QuantumInstance
# Loading your IBM Quantum account(s)
provider = IBMQ.load_account()
# Chemistry Drivers
from qiskit_nature.drivers.second_quantization.pyscfd import PySCFDriver
from qiskit_nature.transformers.second_quantization.electronic import FreezeCoreTransformer
from qiskit.opflow.primitive_ops import Z2Symmetries
# Electroinic structure problem
from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem
# Qubit converter
from qiskit_nature.converters.second_quantization.qubit_converter import QubitConverter
# Mappers
from qiskit_nature.mappers.second_quantization import ParityMapper, BravyiKitaevMapper, JordanWignerMapper
# Initial state
from qiskit_nature.circuit.library import HartreeFock
# Variational form - circuit
from qiskit.circuit.library import TwoLocal
# Optimizer
from qiskit.algorithms.optimizers import COBYLA, SLSQP, SPSA
# Algorithms and Factories
from qiskit_nature.algorithms import ExcitedStatesEigensolver, NumPyEigensolverFactory
# Eigen Solvers
# NumPy Minimum Eigen Solver
from qiskit_nature.algorithms.ground_state_solvers.minimum_eigensolver_factories import NumPyMinimumEigensolverFactory
# ground state
from qiskit_nature.algorithms.ground_state_solvers import GroundStateEigensolver
# VQE Solver
from qiskit.algorithms import VQE
###Output
_____no_output_____
###Markdown
Backend
###Code
qasm_sim = QasmSimulator()
state_sim = StatevectorSimulator()
###Output
_____no_output_____
###Markdown
DriversBelow we set up a PySCF driver for $H2$ molecule at equilibrium bond length 0.735 Angstrom
###Code
def exact_diagonalizer(es_problem, qubit_converter):
solver = NumPyMinimumEigensolverFactory()
calc = GroundStateEigensolver(qubit_converter, solver)
result = calc.solve(es_problem)
return result
def get_mapper(mapper_str: str):
if mapper_str == "jw":
mapper = JordanWignerMapper()
elif mapper_str == "pa":
mapper = ParityMapper()
elif mapper_str == "bk":
mapper = BravyiKitaevMapper()
return mapper
def initial_state_preparation(mapper_str: str = "jw"):
molecule = "Li 0.0 0.0 0.0; H 0.0 0.0 1.5474"
driver = PySCFDriver(atom=molecule)
qmolecule = driver.run()
transformer = FreezeCoreTransformer()
qmolecule = transformer.transform(qmolecule)
es_problem = ElectronicStructureProblem(driver)
# generating second_quzntized operators
second_q_ops = es_problem.second_q_ops()
# Hamiltonian
main_op = second_q_ops[0]
# return tuple of number of particles if available
num_particles = es_problem.num_particles
# return the number of spin orbitals
num_spin_orbitals = es_problem.num_spin_orbitals
mapper = get_mapper(mapper_str)
qubit_converter = QubitConverter(mapper=mapper, two_qubit_reduction=True)#, z2symmetry_reduction=[1, 1])
# Qubit Hamiltonian
qubit_op = qubit_converter.convert(main_op, num_particles=num_particles)
return (qubit_op, num_particles, num_spin_orbitals, qubit_converter, es_problem)
qubit_op, num_particles, num_spin_orbitals, qubit_converter, es_problem = initial_state_preparation("pa")
init_state = HartreeFock(num_spin_orbitals, num_particles, qubit_converter)
init_state.barrier()
init_state.draw("mpl", initial_state=True).savefig("ryrz_vqe_lih_init_state.png", dpi=300)
init_state.draw("mpl", initial_state=True)
# Setting up TwoLocal for our ansatz
ansatz_type = "RY"
# Single qubit rotations that are placed on all qubits with independent parameters
rotation_blocks = ["ry", "rz"]
# Entangling gates
entanglement_blocks = "cx"
# How the qubits are entangled?
entanglement = 'linear'
# Repetitions of rotation_blocks + entanglement_blocks with independent parameters
repetitions = 1
# Skipoing the final rotation_blocks layer
skip_final_rotation_layer = False
ansatz = TwoLocal(
qubit_op.num_qubits,
rotation_blocks,
entanglement_blocks,
reps=repetitions,
entanglement=entanglement,
skip_final_rotation_layer=skip_final_rotation_layer,
# insert_barriers=True
)
# Add the initial state
ansatz.compose(init_state, front=True, inplace=True)
ansatz.draw(output="mpl", initial_state=True).savefig("ryrz_vqe_lih_ansatz.png", dpi=300)
ansatz.draw(output="mpl", initial_state=True)
ansatz.decompose().draw(output="mpl", initial_state=True).savefig("ryrz_vqe_lih_ansatz_decomposed.png", dpi=300)
ansatz.decompose().draw(output="mpl", initial_state=True)
optimizer = COBYLA(maxiter=10000)
###Output
_____no_output_____
###Markdown
SolverExact Eigensolver using NumPyMinimumEigensolver
###Code
result_exact = exact_diagonalizer(es_problem, qubit_converter)
exact_energy = np.real(result_exact.eigenenergies[0])
print("Exact Electronic Energy: {:.4f} Eh\n\n".format(exact_energy))
print("Results:\n\n", result_exact)
###Output
Exact Electronic Energy: -8.9087 Eh
Results:
=== GROUND STATE ENERGY ===
* Electronic ground state energy (Hartree): -8.908697116424
- computed part: -8.908697116424
~ Nuclear repulsion energy (Hartree): 1.025934879643
> Total ground state energy (Hartree): -7.882762236781
=== MEASURED OBSERVABLES ===
0: # Particles: 4.000 S: 0.000 S^2: 0.000 M: 0.000
=== DIPOLE MOMENTS ===
~ Nuclear dipole moment (a.u.): [0.0 0.0 2.92416221]
0:
* Electronic dipole moment (a.u.): [0.0 0.0 4.74455828]
- computed part: [0.0 0.0 4.74455828]
> Dipole moment (a.u.): [0.0 0.0 -1.82039607] Total: 1.82039607
(debye): [0.0 0.0 -4.62698485] Total: 4.62698485
###Markdown
VQE Solver
###Code
from IPython.display import display, clear_output
def callback(eval_count, parameters, mean, std):
# overwrites same line when printing
display("Evaluation: {},\tEnergy: {},\tStd: {}".format(eval_count, mean, std))
clear_output(wait=True)
counts.append(eval_count)
values.append(mean)
params.append(parameters)
deviation.append(std)
counts = []
values = []
params = []
deviation = []
# Set initial parameters of the ansatz
# we choose a fixed small displacement
try:
initial_point = [0.01] * len(ansatz.ordered_parameters)
except:
initial_point = [0.01] * ansatz.num_parameters
algorithm = VQE(
ansatz,
optimizer=optimizer,
quantum_instance=state_sim,
callback=callback,
initial_point=initial_point
)
result = algorithm.compute_minimum_eigenvalue(qubit_op)
print(result)
# Storing results in a dictionary
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Unroller
# Unroller transpile our circuit into CNOTs and U gates
pass_ = Unroller(['u', 'cx'])
pm = PassManager(pass_)
ansatz_tp = pm.run(ansatz)
cnots = ansatz_tp.count_ops()['cx']
score = cnots
accuracy_threshold = 4.0 # in mHa
energy = result.optimal_value
# if ansatz_type == "TwoLocal":
result_dict = {
'optimizer': optimizer.__class__.__name__,
'mapping': qubit_converter.mapper.__class__.__name__,
'ansatz': ansatz.__class__.__name__,
'rotation blocks': rotation_blocks,
'entanglement_blocks': entanglement_blocks,
'entanglement': entanglement,
'repetitions': repetitions,
'skip_final_rotation_layer': skip_final_rotation_layer,
'energy (Ha)': energy,
'error (mHa)': (energy-exact_energy)*1000,
'pass': (energy-exact_energy)*1000 <= accuracy_threshold,
'# of parameters': len(result.optimal_point),
'final parameters': result.optimal_point,
'# of evaluations': result.optimizer_evals,
'optimizer time': result.optimizer_time,
'# of qubits': int(qubit_op.num_qubits),
'# of CNOTs': cnots,
'score': score}
# Plotting the results
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(19.20, 10.80))
plt.rc('font', size=14)
plt.rc('axes', labelsize=14)
plt.rc('xtick', labelsize=14)
plt.rc('ytick', labelsize=14)
plt.rc('legend', fontsize=14)
# ax.set_facecolor("#293952")
ax.set_xlabel('Iterations')
ax.set_ylabel('Energy (Eh)')
ax.grid()
fig.text(0.7, 0.75, f'VQE Energy: {result.optimal_value:.4f} Eh\nExact Energy: {exact_energy:.4f} Eh\nScore: {score:.0f}')
plt.title(f"Ground State Energy of LiH using RYRZ VQE Ansatz\nOptimizer: {result_dict['optimizer']} \n Mapper: {result_dict['mapping']}\nVariational Form: {result_dict['ansatz']} - RY")
ax.plot(counts, values)
ax.axhline(exact_energy, linestyle='--')
# fig_title = f"\
# {result_dict['optimizer']}-\
# {result_dict['mapping']}-\
# {result_dict['ansatz']}-\
# Energy({result_dict['energy (Ha)']:.3f})-\
# Score({result_dict['score']:.0f})\
# .png"
fig.savefig("ryrz_vqe_lih_fig.png", dpi=300)
# Displaying and saving the data
import pandas as pd
result_df = pd.DataFrame.from_dict([result_dict])
result_df[['optimizer','ansatz', '# of qubits', 'error (mHa)', 'pass', 'score','# of parameters','rotation blocks', 'entanglement_blocks',
'entanglement', 'repetitions']]
###Output
_____no_output_____ |
CourseContent/12-Computer.Vision/Week 1/Practice Exercise/CV- practice_exercise_answers.ipynb | ###Markdown
**Import necessary Libraries**
###Code
import numpy as np
from skimage import data, io
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
# Suppress warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
**Load any image**
###Code
# skimage.data has a lot of sample images to play around with
#image = data.coins()
#plt.imshow(image, cmap='gray')
###Output
_____no_output_____
###Markdown
**Import utilities needed for convolution**
###Code
from scipy.signal import convolve2d
###Output
_____no_output_____
###Markdown
**Find the 5X5 Guassian Blur kernel with sigma = 1.0 and convolve the above image with that kernel***Hint: You can create various Guassian kernel at http://dev.theomader.com/gaussian-kernel-calculator/*
###Code
kernel = np.array([[0.003765,0.015019,0.023792,0.015019,0.003765],
[0.015019,0.059912,0.094907,0.059912,0.015019],
[0.023792,0.094907,0.150342,0.094907,0.023792],
[0.015019,0.059912,0.094907,0.059912,0.015019],
[0.003765,0.015019,0.023792,0.015019,0.003765]])
###Output
_____no_output_____
###Markdown
**Convole the guassian kernel with the image and use 'valid' convolution and show the result side by side**
###Code
blurred_image = convolve2d(image, kernel, mode = 'valid')
plt.imshow(blurred_image,cmap='gray')
plt.show()
plt.imshow(image,cmap='gray')
###Output
_____no_output_____
###Markdown
Build a CNN to classify 10 monkey species **Mounting Google Drive on to the Google Colab instance**
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____
###Markdown
**Set the appropriate path for the datsaet zip provided**
###Code
images_path = "/content/drive/My Drive/Colab Notebooks/tutorials/zips/monkeys_dataset.zip"
###Output
_____no_output_____
###Markdown
**Extracting the dataset.zip to the present working directory**
###Code
from zipfile import ZipFile
with ZipFile(images_path, 'r') as zip:
zip.extractall()
###Output
_____no_output_____
###Markdown
*Check the list of files in the pwd(present working directory) by running command 'ls' and ensure 'dataset' folder has been generated*
###Code
!ls
###Output
_____no_output_____
###Markdown
**Importing required Keras modules**
###Code
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense, Dropout
###Output
_____no_output_____
###Markdown
**Build a Sequential CNN classifier with input shape as 64X64 and using three sets of Convoltutional + Pooling layers. You can additionally use Dropout in the fully connected layers. Make sure the final layer shape matches with the number of classes**
###Code
# Initialising the CNN classifier
classifier = Sequential()
# Add a Convolution layer with 32 kernels of 3X3 shape with activation function ReLU
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu', padding = 'same'))
# Add a Max Pooling layer of size 2X2
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Add another Convolution layer with 32 kernels of 3X3 shape with activation function ReLU
classifier.add(Conv2D(32, (3, 3), activation = 'relu', padding = 'same'))
# Adding another pooling layer
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Add another Convolution layer with 32 kernels of 3X3 shape with activation function ReLU
classifier.add(Conv2D(32, (3, 3), activation = 'relu', padding = 'valid'))
# Adding another pooling layer
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Flattening the layer before fully connected layers
classifier.add(Flatten())
# Adding a fully connected layer with 512 neurons
classifier.add(Dense(units = 512, activation = 'relu'))
# Adding dropout with probability 0.5
classifier.add(Dropout(0.5))
# Adding a fully connected layer with 128 neurons
classifier.add(Dense(units = 128, activation = 'relu'))
# The final output layer with 10 neurons to predict the categorical classifcation
classifier.add(Dense(units = 10, activation = 'softmax'))
###Output
_____no_output_____
###Markdown
**Compile the CNN classifier with Adam optimizer (default Learning rate and other parameters)and Categorical Crossentropy as loss function and Accuracy as the metric to monitor** *Optionally you can use an optimizer with custom learning rate and passing it to the optimizer parameter of compile**Eg: keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)*
###Code
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
###Output
_____no_output_____
###Markdown
**Use ImageDataGenerator to create a test and training set data generators and use fit_generator() function to train the model** *ImageDataGenerator is a powerful preprocessing utility to generate training and testing data with common data augmentation techniques. It can also be used to generate training data from Images stored in hierarchical directory structuresFor more options of ImageDataGenerator go to https://keras.io/preprocessing/image/*
###Code
from keras.preprocessing.image import ImageDataGenerator
# Create data generator for training data with data augmentation and normalizing all
# values by 255
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
# Setting training data generator's source directory
# Setting the target size to resize all the images to (64,64) as the model input layer expects 64X64 images
training_set = train_datagen.flow_from_directory('./dataset/train',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical')
# Setting testing data generator's source directory
test_set = test_datagen.flow_from_directory('./dataset/test',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical')
# There are 1098 training images and 272 test images in total
classifier.fit_generator(training_set,
steps_per_epoch = int(1098/32),
epochs = 10,
validation_data = test_set,
validation_steps = int(272/32))
###Output
_____no_output_____
###Markdown
**save the model and its weights**
###Code
classifier.save('./classifier.h5')
classifier.save_weights('./classifier_weights.h5')
###Output
_____no_output_____
###Markdown
*Check the current directory if the weights have been saved*
###Code
!ls
###Output
_____no_output_____
###Markdown
Testing the model **Load the pre-trained saved model and load the weights**
###Code
from keras.models import load_model
import numpy as np
from keras.preprocessing import image
# Load the pre trained model from the HDF5 file saved previously
pretrained_model = load_model('./classifier.h5')
pretrained_model.load_weights('./classifier_weights.h5')
###Output
_____no_output_____
###Markdown
**Test the model on one single image from the test folders**
###Code
import cv2
test_image = cv2.imread('./cifar10/test/airplane/0001.png')
# Check if the size of the Image array is compatible with Keras model
print(test_image.shape)
# If not compatible expand the dimensions to match with the Keras Input
test_image = np.expand_dims(test_image, axis = 0)
test_image =test_image*1/255.0
#Check the size of the Image array again
print('After expand_dims: '+ str(test_image.shape))
#Predict the result of the test image
result = classifier.predict(test_image)
# Check the indices Image Data Generator has allotted to each folder
classes_dict = training_set.class_indices
print(classes_dict)
# Creating a list of classes in test set for showing the result as the folder name
prediction_class = []
for class_name,index in classes_dict.items():
prediction_class.append(class_name)
print(result[0])
# Index of the class with maximum probability
predicted_index = np.argmax(result[0])
# Print the name of the class
print(prediction_class[predicted_index])
###Output
_____no_output_____
###Markdown
**Generating a report on the test data**
###Code
# Re-initalizing the test data generator with shuffle=False to create the confusion matrix
test_set = test_datagen.flow_from_directory('./dataset/test',
target_size = (32, 32),
batch_size = 32,
shuffle=False,
class_mode = 'categorical')
# Predict the whole generator to get predictions
Y_pred = classifier.predict_generator(test_set, int(10000/32+1))
# Find out the predictions classes with maximum probability
y_pred = np.argmax(Y_pred, axis=1)
# Utilities for confusion matrix
from sklearn.metrics import classification_report, confusion_matrix
# Printing the confusion matrix based on the actual data vs predicted data.
print(confusion_matrix(test_set.classes, y_pred))
# Printing the classification report
print(classification_report(test_set.classes, y_pred, target_names=prediction_class))
###Output
_____no_output_____ |
src/datacleaning/Chapter 8/1_combine_vertically.ipynb | ###Markdown
Table of Contents1 Import pandas and NumPy, as well as the os module2 Load the data from Cameroon and Poland3 Concatenate the Cameroon and Poland data4 Concatenate all the country data files5 Show some of the combined data6 Check the values in the concatenated data7 Fix the missing values Import pandas and NumPy, as well as the os module
###Code
import pandas as pd
import numpy as np
import os
# pd.set_option('display.width', 200)
# pd.set_option('display.max_columns', 35)
# pd.set_option('display.max_rows', 50)
# pd.options.display.float_format = '{:,.0f}'.format
import watermark
%load_ext watermark
%watermark -n -i -iv
###Output
pandas : 1.2.1
json : 2.0.9
numpy : 1.19.2
watermark: 2.1.0
###Markdown
Load the data from Cameroon and Poland
###Code
ltcameroon = pd.read_csv('data/ltcountry/ltcameroon.csv')
ltpoland = pd.read_csv('data/ltcountry/ltpoland.csv')
###Output
_____no_output_____
###Markdown
Concatenate the Cameroon and Poland data
###Code
ltcameroon.shape
ltpoland.shape
ltpoland.columns
ltcameroon.columns
ltall = pd.concat([ltcameroon, ltpoland])
ltall['country'].value_counts()
###Output
_____no_output_____
###Markdown
Concatenate all the country data files
###Code
directory = 'data/ltcountry'
ltall = pd.DataFrame()
for filename in os.listdir(directory):
if filename.endswith('.csv'):
fileloc = os.path.join(directory, filename)
# open the next file
with open(fileloc) as file:
ltnew = pd.read_csv(fileloc)
print(filename + ' has ' + str(ltnew.shape[0]) + ' rows.')
ltall = pd.concat([ltall, ltnew])
# check for differences in columns
columndiff = ltall.columns.symmetric_difference(ltnew.columns)
if (not columndiff.empty):
print('',
'Different column names for: ',
filename,
columndiff,
sep='\n')
###Output
ltbrazil.csv has 1104 rows.
ltcameroon.csv has 48 rows.
ltindia.csv has 1056 rows.
ltjapan.csv has 1800 rows.
ltmexico.csv has 852 rows.
ltoman.csv has 288 rows.
Different column names for:
ltoman.csv
Index(['latabs'], dtype='object')
ltpoland.csv has 120 rows.
###Markdown
Show some of the combined data
###Code
ltall[['country', 'station', 'month', 'temperature',
'latitude']].sample(5, random_state=1)
###Output
_____no_output_____
###Markdown
Check the values in the concatenated data
###Code
ltall['country'].value_counts().sort_index()
ltall.groupby(['country']).agg({
'temperature': ['min', 'mean', 'max', 'count'],
'latabs': ['min', 'mean', 'max', 'count']
})
###Output
_____no_output_____
###Markdown
Fix the missing values
###Code
ltall['latabs'] = np.where(ltall['country'] == 'Oman', ltall['latitude'],
ltall['latabs'])
ltall.groupby(['country']).agg({
'temperature': ['min', 'mean', 'max', 'count'],
'latabs': ['min', 'mean', 'max', 'count']
})
###Output
_____no_output_____ |
pandaScript/.ipynb_checkpoints/Untitled-checkpoint.ipynb | ###Markdown
Welcome to our test notebook.We can use markdown
###Code
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size':20, 'figure.figsize':(10,8)}) # set font and plot size to be larger
print("this is a test")
x=5
x
data = {
'apples': [3, 2, 0, 1],
'oranges': [0, 3, 7, 2],
'five':5,
'six':'seven'
}
purchases = pd.DataFrame(data, index=['June','July','August','September'])
import pandas as pd
import matplotlib.pyplot as plt
df = pd.DataFrame({
'name':['john','mary','peter','jeff','bill','lisa','jose'],
'age':[23,78,22,19,45,33,20],
'gender':['M','F','M','M','M','F','M'],
'state':['california','dc','california','dc','california','texas','texas'],
'num_children':[2,0,0,3,2,1,4],
'num_pets':[5,1,0,5,2,2,3]
})
print (df)
df
df.plot(kind='scatter', x='num_children', y='num_pets', color='red')
###Output
_____no_output_____ |
algoExpert/find_three_largest_numbers/solution.ipynb | ###Markdown
Find Three Largest Number[link](https://www.algoexpert.io/questions/Find%20Three%20Largest%20Numbers) My Solution
###Code
def findThreeLargestNumbers(array):
# Write your code here.
maxThree = [None, None, None] # maxThree[2] is the largest
for x in array:
if maxThree[0] == None:
maxThree[0] = x
continue
if maxThree[1] == None:
if x >= maxThree[0]:
maxThree[1] = x
else:
maxThree[1], maxThree[0] = maxThree[0], x
continue
if maxThree[2] == None:
if x >= maxThree[1]:
maxThree[2] = x
elif x < maxThree[1] and x >= maxThree[0]:
maxThree[2], maxThree[1]= maxThree[1], x
else:
maxThree = [maxThree[1], maxThree[0], x]
continue
if x > maxThree[0]:
maxThree[0] = x
if maxThree[0] > maxThree[1]:
maxThree[1], maxThree[0] = maxThree[0], maxThree[1]
if maxThree[1] > maxThree[2]:
maxThree[2], maxThree[1] = maxThree[1], maxThree[2]
return maxThree
def findThreeLargestNumbers(array):
# Write your code here.
maxThree = array[:3] # maxThree[2] is the largest
for i in range(len(array)):
if i >=3 and array[i] > maxThree[0]:
maxThree[0] = array[i]
if maxThree[0] > maxThree[1]:
maxThree[1], maxThree[0] = maxThree[0], maxThree[1]
if maxThree[1] > maxThree[2]:
maxThree[2], maxThree[1] = maxThree[1], maxThree[2]
return maxThree
###Output
_____no_output_____
###Markdown
Expert Solution
###Code
def findThreeLargestNumbers(array):
threeLargest = [None, None, None]
for num in array:
updateLargest(threeLargest, num)
return threeLargest
def updateLargest(threeLargest, num):
if threeLargest[2] is None or num > threeLargest[2]:
shiftAndUpdate(threeLargest, num, 2)
elif threeLargest[1] is None or num > threeLargest[1]:
shiftAndUpdate(threeLargest, num, 1)
elif threeLargest[0] is None or num > threeLargest[0]:
shiftAndUpdate(threeLargest, num, 0)
def shiftAndUpdate(array, num, idx):
for i in range(idx + 1):
if i == idx:
array[i] = num
else:
array[i] = array[i + 1]
###Output
_____no_output_____ |
.ipynb_checkpoints/Logistic Regression-Copy2-checkpoint.ipynb | ###Markdown
Logistic regression In this notebook we will study **Logistic Regression**.We will make some interactive graphs that let us see how it works.We will use interactive Jupyter widgets and the libraries **matplotlib** and **bqplot** for visualizationsTo obtain more info you can read these posts [SPANISH]:**Author**: Pablo González Carrizo ([unmonoqueteclea](https://twitter.com/unmonoqueteclea))**Web**: https://unmonoqueteclea.github.io Importing dependencies
###Code
import math
import numpy as np
from bqplot import ( LinearScale, Axis, Scatter, Lines, Label, Figure)
from ipywidgets import HBox, VBox, Layout
import pandas as pd
from scipy import special
from sklearn import preprocessing
###Output
_____no_output_____
###Markdown
Defining sigmoid function
###Code
def sigmoid(z):
return(1 / (1 + np.exp(-z)))
x_values = np.arange(-8,8)
test_sigmoid=[sigmoid(z) for z in x_values ]
###Output
_____no_output_____
###Markdown
Plotting the sigmoid function with bqplot
###Code
#Scalers
sc_x = LinearScale()
sc_y = LinearScale()
#Axis
ax_x = Axis(scale=sc_x, label='')
ax_y = Axis(scale=sc_y, orientation='vertical', tick_format='0.2f', label='')
#Creating the graph
line = Lines(x=x_values,y=test_sigmoid,scales={'x': sc_x, 'y': sc_y},colors=['blue'])
fig = Figure(marks=[line], axes=[ax_x, ax_y],layout=Layout(width='100%'), title="Sigmoid function")
#Displaying the graph
VBox([fig])
###Output
_____no_output_____
###Markdown
Creating points
###Code
#(CLASS Y = 1) Positions of points with y = 1
posX1 = np.array([10,45,23,12,3 ,18,30,35, 5,32])
posY1 = np.array([12,16,20,60,80,99,54, 9,40,65])
#Creating matrix from positions
X1 = np.c_[np.ones(posX1.shape[0]),posX1,posY1]
#(CLASS Y = 0) Positions of points with y = 0
posX2 = np.array([67,53,90,87,71,59,95,80,65,80])
posY2 = np.array([34,67,54,8, 78,87,80,50,60,90])
#Creating matrix from positions
X2 = np.c_[np.ones(posX2.shape[0]),posX2,posY2]
X=np.concatenate([X1,X2])
#Classes (1 or 0)
y=np.concatenate([np.ones(posX1.shape[0]),np.zeros(posX2.shape[0])])
m = y.size # Number of training examples
###Output
_____no_output_____
###Markdown
Plot function
###Code
def plot_points(x1,x2,y1,y2,title="",boundary=None):
#Scalers
sc_x = LinearScale(min=0,max=100)
sc_y = LinearScale(min=0,max=100)
#Axis
ax_x = Axis(scale=sc_x, label='')
ax_y = Axis(scale=sc_y, orientation='vertical', tick_format='0.2f', label='')
#Creating plot
scatt = Scatter(x=x1, y=y1, scales={'x': sc_x, 'y': sc_y}, colors=['red'])
scatt2 = Scatter(x=x2, y=y2, scales={'x': sc_x, 'y': sc_y}, colors=['blue'])
if(boundary is None):
fig = Figure(marks=[scatt,scatt2], axes=[ax_x, ax_y],layout=Layout(width='100%'), title=title)
else:
lines = Lines(x=boundary[0],y=boundary[1],scales={'x': sc_x, 'y': sc_y},colors=['green'])
fig = Figure(marks=[scatt,scatt2,lines], axes=[ax_x, ax_y],layout=Layout(width='100%'), title=title)
return fig
###Output
_____no_output_____
###Markdown
Displying plot with all the points
###Code
fig = plot_points(posX1,posX2,posY1,posY2,title="")
HBox([fig])
###Output
_____no_output_____
###Markdown
Hypotesis and Cost function
###Code
def h(mytheta,myX):
#The expit function, also known as the logistic function,
#is defined as expit(x) = 1/(1+exp(-x)).
#It is the inverse of the logit function.
return special.expit(np.dot(myX,mytheta))
#Cost function
def computeCost(mytheta,myX,myy,regularization = 0):
term1 = np.dot( -np.array(myy).T , np.log(h(mytheta,myX)) )
term2 = np.dot( (1-np.array(myy)).T , np.log(1-h(mytheta,myX)) )
regterm = (regularization/2) * np.sum(np.dot(mytheta[1:].T,mytheta[1:])) #Skip theta0
return float( (1./m) * ( np.sum(term1 - term2) + regterm ) )
initial_theta = np.zeros((X.shape[1],1))
computeCost(initial_theta,X,y) #Computing initial cost
###Output
_____no_output_____
###Markdown
Representing cost function
###Code
h = np.arange(0.0001,1,0.001)
y=0
# Computing cost
cost1 = [-math.log(h_value,10) for h_value in h]
cost2=[-math.log(1-h_value,10) for h_value in h]
#Scalers
sc_x = LinearScale()
sc_y = LinearScale()
#Axis
ax_x = Axis(scale=sc_x, label='h(x)')
ax_y = Axis(scale=sc_y, orientation='vertical', tick_format='0.2f', label='Cost')
#Creating the graph
line1 = Lines(x=h,y=cost1,scales={'x': sc_x, 'y': sc_y},colors=['blue'])
fig1 = Figure(marks=[line1], axes=[ax_x, ax_y],layout=Layout(width='100%'), title="y=1")
line2 = Lines(x=h,y=cost2,scales={'x': sc_x, 'y': sc_y},colors=['blue'])
fig2 = Figure(marks=[line2], axes=[ax_x, ax_y],layout=Layout(width='100%'), title="y=0")
#Displaying the graph
HBox([fig1,fig2])
###Output
_____no_output_____
###Markdown
Obtaining theta
###Code
#This function minimizes our cost function using the "downhill simplex algorithm."
#http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.optimize.fmin.html
from scipy import optimize
def optimizeTheta(mytheta,myX,myy,mylambda=0.):
result = optimize.fmin(computeCost, x0=mytheta, args=(myX, myy, mylambda), maxiter=400, full_output=True)
return result[0], result[1]
theta, mincost = optimizeTheta(initial_theta,X,y)
def makePrediction(mytheta, myx):
return h(mytheta,myx) >= 0.5
#Compute the percentage of samples I got correct:
pos_correct = float(np.sum(makePrediction(theta,X1)))
neg_correct = float(np.sum(np.invert(makePrediction(theta,X2))))
tot = len(X1)+len(X2)
prcnt_correct = float(pos_correct+neg_correct)/tot
print("Fraction of training samples correctly predicted: %f." % prcnt_correct)
def decission_boundary(mytheta):
boundary_xs = np.array([np.min(X[:,1]), np.max(X[:,1])])
boundary_ys = (-1./theta[2])*(theta[0] + theta[1]*boundary_xs)
return (boundary_xs,boundary_ys)
fig = plot_points(posX1,posX2,posY1,posY2,title="",boundary=decission_boundary(theta))
HBox([fig])
###Output
_____no_output_____ |
Train Word Embedding.ipynb | ###Markdown
Pre-processing Functions
###Code
def remove_stop_words(contents):
stop_words = list(stopwords.words('english'))
for w in stop_words:
contents = contents.replace(w, '')
return contents
###Output
_____no_output_____
###Markdown
Tokenize and remove unnecessary characters
###Code
def remove_unnecessary_characters(contents):
contents = contents.replace('\n', ' ')
contents = contents.replace('..', '')
contents = contents.replace('--', '')
contents = contents.replace('==', '')
contents = contents.replace('///', '')
contents = contents.replace('\\\\', '')
contents = ' '.join(contents.split())
contents = contents.strip().lower()
# contents = remove_stop_words(contents)
tokenizer = RegexpTokenizer('[A-Za-z0-9\@\.\&\/\:\$\-\_]+')
tokens = tokenizer.tokenize(contents)
tokens = ' '.join( [i for i in tokens if len(i) > 1])
return tokens
def replace_email(content):
pattern = re.compile('[\w\/\.\-]+\@[\w\/\.\-]+\.[\w]+')
replaced_content = re.sub(pattern, 'this_is_email', content)
return replaced_content
def replace_link(content):
pattern = re.compile('(http[s]?:\/\/|www\.)?[\w\/\.\-]+\.(com|html|php)([\/][\w\/\.\-]*)*')
replaced_content = re.sub(pattern, 'this_is_link', content)
return replaced_content
###Output
_____no_output_____
###Markdown
Main program Read CSV data for train data and test data
###Code
train_data = pd.read_csv('dataset/train_data.csv')
test_data = pd.read_csv('dataset/test_data.csv')
###Output
_____no_output_____
###Markdown
Tokenize remove unnecessary characters for train data and test data
###Code
preproc_train_data = train_data['content'].copy().apply(remove_unnecessary_characters).apply(replace_email).apply(replace_link)
print('Train data')
preproc_train_data.head()
preproc_test_data = test_data['content'].copy().apply(remove_unnecessary_characters).apply(replace_email).apply(replace_link)
print('Test data')
preproc_test_data.head()
corpus = []
for row in preproc_train_data.iteritems():
corpus.append(row[1].split())
for row in preproc_test_data.iteritems():
corpus.append(row[1].split())
import gensim
model_cbow = gensim.models.Word2Vec(
corpus,
size=50,
sg=0,
window=2,
min_count=5,
workers=4
)
model_cbow.train(corpus, total_examples=len(corpus), epochs=50)
w2v_cbow = dict(zip(model_cbow.wv.index2word, model_cbow.wv.syn0))
model_sg = gensim.models.Word2Vec(
corpus,
size=50,
sg=1,
window=2,
min_count=5,
workers=4
)
model_sg.train(corpus, total_examples=len(corpus), epochs=50)
w2v_sg = dict(zip(model_sg.wv.index2word, model_sg.wv.syn0))
len(w2v_sg.items())
class MeanEmbeddingVectorizer(object):
def __init__(self, word2vec):
self.word2vec = word2vec
# if a text is empty we should return a vector of zeros
# with the same dimensionality as all the other vectors
self.dim = 50
def fit(self, X, y):
return self
def transform(self, X):
return np.array([
np.max([self.word2vec[w] for w in words if w in self.word2vec]
or [np.zeros(self.dim)], axis=0)
for words in X
])
from sklearn.pipeline import Pipeline
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.svm import SVC
etree_w2v_cbow = Pipeline([
("word2vec vectorizer", MeanEmbeddingVectorizer(w2v_cbow)),
("svc", SVC(kernel='sigmoid', gamma=1.0))])
etree_w2v_sg = Pipeline([
("word2vec vectorizer", MeanEmbeddingVectorizer(w2v_sg)),
("svc", SVC(kernel='sigmoid', gamma=1.0))])
features_train, features_test, labels_train, labels_test = train_test_split(preproc_train_data, train_data['prediction'], test_size=0.2, random_state=24)
etree_w2v_cbow.fit(features_train, labels_train)
etree_w2v_sg.fit(features_train, labels_train)
###Output
_____no_output_____
###Markdown
CBOW
###Code
prediction = etree_w2v_cbow.predict(features_test)
accuracy_score(labels_test, prediction)
###Output
_____no_output_____
###Markdown
SG
###Code
prediction = etree_w2v_sg.predict(features_test)
accuracy_score(labels_test, prediction)
###Output
_____no_output_____ |
drlim.ipynb | ###Markdown
Dr LIM - Dimensionality reduction by Learning Invariant Mapping- This paper (similar to TSNE) proposes an alternative method to achieve dimensionality reduction
###Code
from __future__ import print_function
from functools import reduce
import numpy as np
import torch
import torch.nn as nn
import torchvision
import torch.nn.functional as F
import torch.optim as optim
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from matplotlib import pyplot as plt
from image_utilities import plot_images
###Output
_____no_output_____
###Markdown
CNN used in the paper ![CNN architecture](./images/cnn.png) Torch implementation Forward
###Code
class DrlimCNN(nn.Module):
def __init__(self, n_lower_dim=2):
super(DrlimCNN, self).__init__()
# Layer 1:
# n_input_channel = 1
# n_output_channel = 15
# Kernel Size = 5 for padding = 0, stride = 1
k_size = 5
in_channels = 1
out_channels = 15
self.layer_1 = nn.Conv2d(in_channels, out_channels, k_size)
# Layer 2: Subsampling - Maxpooling
# Kernel Size = 15 for padding=0 and stride = 1
k_size = 15
self.max_pooling = nn.MaxPool2d(k_size, stride=1)
# Layer 3: Conv layer
# n_input_channel = 15
# n_output_channel = 30
# Kernel size = 10
in_channels = 15
out_channels = 30
k_size = 10
self.layer_3 = nn.Conv2d(in_channels, out_channels, k_size)
# Layer 4: Fully connected
self.output_layer = nn.Linear(30, n_lower_dim)
# Relu
self.relu = nn.ReLU()
def forward(self, x):
# Add relu on top of conv layer
x = self.layer_1(x)
x = self.relu(x)
# Maxpool
x = self.max_pooling(x)
# Another conv
x = self.layer_3(x)
# Get the size except for batch
num_flat_features = reduce(lambda x, y: x * y, x.shape[1:])
# Flatten
x = x.reshape(-1, num_flat_features)
# Fully connected
x = self.output_layer(x)
return x
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
net = DrlimCNN()
# Enable GPU
net.to(device)
net.cuda()
###Output
cuda:0
###Markdown
Backward Define contrastive loss function
###Code
def contrastive_loss(output_1, output_2,
target_1, target_2):
# TODO:
# if target_1 == target_2:
# y = torch.zeros_like(output_1, requires_grad=True)
# else:
# y = torch.ones_like(output_1, requires_grad=True)
y = 1 - torch.eq(target_1, target_2).int()
distance = torch.norm(output_1 - output_2, dim=1)
# Similar loss
ls = torch.pow(distance, 2)
# Dissimilar loss
m = 10
ld = torch.max(torch.zeros_like(distance), m - distance)
ld = torch.pow(ld, 2)
loss = torch.mean((1 - y) * ls + y * ld)
return loss
###Output
_____no_output_____
###Markdown
Test the network once
###Code
test = False
if test:
# Test Code
input_1 = torch.randn(1, 1, 28, 28)
input_2 = torch.randn(1, 1, 28, 28)
out_1 = net(input_1.cuda())
out_2 = net(input_2.cuda())
out_1 = out_1.requires_grad_(True)
out_2 = out_2.requires_grad_(True)
print("Before:")
print(net.output_layer.weight.grad)
loss = contrastive_loss(out_1, out_2,
0, 0)
print(loss)
loss.backward()
print("After:")
print(net.output_layer.weight.grad)
###Output
_____no_output_____
###Markdown
Load Data
###Code
n_epochs = 3
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 10
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
###Output
_____no_output_____
###Markdown
Train
###Code
target_1 = 9
target_2 = 4
n_epochs = 100
n_random_repeat = 2
optimizer = optim.Adam(net.parameters(), lr=3.e-4)
all_data = []
all_target = []
for batch_idx, (data, target) in enumerate(train_loader):
# Choose only either `target_1` or `target_2`
filter_index = ((target == target_1) | (target == target_2))
all_data += data[filter_index].numpy().tolist()
all_target += target[filter_index].numpy().tolist()
all_data.pop()
all_target.pop()
batch_size = 30
all_data = np.array(all_data).reshape(11790, 1, 28, 28)
all_target = np.array(all_target).reshape(11790, 1)
all_data = np.array(all_data).reshape(-1, 30, 28, 28)
all_target = np.array(all_target).reshape(-1, 30)
loss_cache = []
for epoch in range(n_epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(all_data):
target = torch.tensor(all_target[i], dtype=torch.float32)
data = torch.tensor(data, dtype=torch.float32)
data = torch.unsqueeze(data, 1).to(device)
input_data, label = data, target
for r in range(n_random_repeat):
# Split this batch into two groups and compute contrast loss between them
out = net(input_data)
sample = np.random.randint(0, 30, 15).tolist()
out_1, label_1 = out[sample, :], target[sample]
sample = np.random.randint(0, 30, 15).tolist()
out_2, label_2 = out[sample, :], target[sample]
# Loss - against target_1 and target_2
loss = contrastive_loss(out_1.to('cpu'), out_2.to('cpu'),
label_1, label_2)
# Back prop
loss.backward()
optimizer.step()
loss_cache.append(loss.detach())
# print statistics
running_loss += loss.item()
if i % 20 == 0: # print every 2000 mini-batches
print('[{}, {}] loss: {} \r'.format(epoch + 1, i + 1, running_loss / 2000), end="")
running_loss = 0.0
optimizer.zero_grad()
print("")
print('Finished Training')
plt.plot(list(range(len(loss_cache))), loss_cache)
plt.title("Epoch vs Loss")
plt.ylabel('Loss')
plt.xlabel('Steps')
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
data_x = []
data_y = []
data_manifold = []
for i in range(10):
filter_index = (target == i)
temp_data = data[filter_index].numpy().squeeze()
# Get manifold output
temp_data = torch.tensor(temp_data).unsqueeze(1).to(device)
out = net(temp_data)
out = out.clone()
out = out.cpu().detach().numpy().squeeze()
# Prepare data to plot
data_x.append(out[:, 0])
data_y.append(out[:, 1])
def plot_graphs(one=False, two=False,
three=False, four=True,
five=False, six=False,
seven=False, eight=False,
nine=True, zero=False):
global data_x, data_y
plt.figure(figsize=(14, 10))
mapping = {0: "zero", 1: "one",
2: "two", 3: "three",
4: "four", 5: "five",
6: "six", 7: "seven",
8: "eight", 9: "nine"}
for k in mapping:
if locals()[mapping[k]]:
plt.scatter(data_x[k], data_y[k], label="Number {}".format(k), alpha=0.5)
plt.title("Lower dimensional map - Number 9 and Number 4")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Simple Plot
###Code
plot_graphs(nine=True, four=True)
###Output
_____no_output_____
###Markdown
Interactive Graph
###Code
w = interactive(plot_graphs)
display(w)
###Output
_____no_output_____ |
Jupyter_notebooks/Notebook_3_CNN_Predictions_of_Native_UTR_HIS3_data.ipynb | ###Markdown
Set plotting style
###Code
plt.rcParams["patch.force_edgecolor"] = True
sns.set_style('whitegrid',
{'axes.grid': True,
'grid.linestyle': u'--',
'axes.edgecolor': '0.1',
'axes.labelcolor': '0',
'axes.labelsize': 15,
'axes.titlesize': 15,
'legend.fontsize': 15,
'xtick.labelsize': 15,
'ytick.labelsize': 15,
})
###Output
_____no_output_____
###Markdown
The directory that contains information about the model parameters
###Code
model_name = 'Random_UTR_CNN'
model_params_dir = '../Results/{0}.Hyperparam.Opt/'.format(model_name)
###Output
_____no_output_____
###Markdown
Create a directory to save results:(if it doesn't already exist)
###Code
results_dir = '../Results/{0}.ModelPredictions/'.format(model_name)
if not os.path.exists(model_params_dir):
os.mkdir(model_params_dir)
###Output
_____no_output_____
###Markdown
Load the cleaned up data.The csv should be tab-separated. The read counts are log2.
###Code
data_dir = '../Data/'
native_data = pd.read_csv(data_dir + 'Native_UTRs.csv', index_col = 0)
###Output
_____no_output_____
###Markdown
One-hot encoding of the sequences.i.e. we're converting the sequences from being represented as a 50 character string of bases to a 4x50 matrix of 1's and 0's, with each row corresponding to a base and every column a position in the UTR.Note that we're doing the indexing a little differently than in Notebook 1 -- see comments in function
###Code
# one hot encoding of UTRs
# X = one hot encoding matrix
# Y = growth rates
def one_hot_encoding(df, seq_column, expression):
bases = ['A','C','G','T']
base_dict = dict(zip(bases,range(4))) # {'A' : 0, 'C' : 1, 'G' : 2, 'T' : 3}
n = len(df)
# length of the UTR sequence
# we also add 10 empty spaces to either side
total_width = df[seq_column].str.len().max() + 20
# initialize an empty numpy ndarray of the appropriate size
X = np.zeros((n, 1, 4, total_width))
# an array with the sequences that we will one-hot encode
seqs = df[seq_column].values
# loop through the array of sequences to create an array that keras will actually read
for i in range(n):
seq = seqs[i]
# loop through each individual sequence, from the 5' to 3' end
for b in range(len(seq)):
# this will assign a 1 to the appropriate base and position for this UTR sequence
# Note that this is different than the same function in Notebook #1 (since we're dealing
# with sequences with nonuniform lengths)
X[i, 0, base_dict[seq[b]], b + 10 + 50 - len(seq)] = 1.
# keep track of where we are
if (i%10000)==0:
print i,
X = X.astype(theano.config.floatX)
Y = np.asarray(df[expression].values,
dtype = theano.config.floatX)[:, np.newaxis]
return X, Y, total_width
X, Y, total_width = one_hot_encoding(native_data, 'UTR', 'growth_rate')
###Output
0 10000
###Markdown
Record indexes for UTRs with >100 reads in the input If we have more reads for a given UTR at the outset, we can be more confident that we have made an accurate measurement. For this reason, we use those UTRs with the most reads to test our model on, because these should have the least experimental noise.
###Code
# a numpy array of the indexes of UTRs with > 100 reads
test_inds = native_data.loc[native_data.t0 > 100].index.values
###Output
_____no_output_____
###Markdown
Load model trained on random 5' UTRs
###Code
!ls {model_params_dir}
model = keras.models.model_from_json(open(model_params_dir + 'model_arch.json').read())
model.load_weights(model_params_dir + 'model_weights.hdf5')
model.compile(loss='mean_squared_error', optimizer='adam')
Y_pred = model.predict(X, verbose=1)
###Output
11840/11856 [============================>.] - ETA: 0s
###Markdown
Plot results
###Code
# data
x = Y_pred[test_inds].flatten()
y = Y[test_inds].flatten()
# calculate R^2
r2 = scipy.stats.pearsonr(x, y)[0]**2
g = sns.jointplot(x,
y,
stat_func = None,
kind = 'scatter',
s = 5,
alpha = 0.25,
size = 5)
g.ax_joint.set_xlabel('Predicted log$_2$ Growth Rate')
g.ax_joint.set_ylabel('Measured log$_2$ Growth Rate')
text = "R$^2$ = {:0.2}".format(r2)
plt.annotate(text, xy=(-5.5, 0.95), xycoords='axes fraction')
plt.title("CNN predictions of native 5' UTR HIS3 data", x = -3, y = 1.25)
###Output
_____no_output_____
###Markdown
Save data and predictions to csv
###Code
native_data['pred_growth_rate'] = Y_pred
native_data.to_csv(results_dir + 'Random_UTRs_with_predictions.csv')
###Output
_____no_output_____ |
tutorial-ja/155_four_ja.ipynb | ###Markdown
たし算量子コンピュータには従来のコンピュータと同じ計算もできるという特徴があります。たし算の回路を確認します。 今回学ぶこと1. 量子ゲートを使って二進数のたし算を実装2. 量子の重ね合わせを使って1つの回路で複数のたし算 BlueqatのインストールpipからBlueqatをインストールします。
###Code
!pip install blueqat
###Output
Requirement already satisfied: blueqat in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (0.3.13)
Requirement already satisfied: numpy~=1.12 in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from blueqat) (1.18.3)
Requirement already satisfied: scipy>=1.1.0 in /home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages (from blueqat) (1.1.0)
[33mWARNING: You are using pip version 20.0.2; however, version 20.1 is available.
You should consider upgrading via the '/home/ec2-user/anaconda3/envs/python3/bin/python -m pip install --upgrade pip' command.[0m
###Markdown
二進数のたし算たし算は桁上がりを実行するccxゲートと、位の足し合わせを実行するcxゲートを使います。今回はa+b=cdという二進数のたし算の量子回路を行います。今回はaとbの値によって4種類のたし算を実装します。それぞれのたし算は、0+0 = 00 => 0000 0+1 = 01 => 0101 1+0 = 01 => 1001 1+1 = 10 => 1110 となります。4量子ビットのビット列で表現し、前半の2量子ビットが入力値aとbで、後半の2量子ビットが出力値cとdです。aとbを入力する回路と、実際にたし合わせを実現する回路を別に作り、何度か使い回します。1のようなデータの入力はXゲートを使って0を反転させて実装します。 たし算の回路部分はこのようになります。*はコントロールビットです。```a ---*---*------- ab ---*---|---*--- b0 ---X---|---|--- c0 -------X---X--- d```aとbにXゲートを使ってデータを入れると、ccx回路で桁上がり、cx回路で位のたし合わせを行います。
###Code
#ツールの読み込み
from blueqat import Circuit
#たし算部分を実現します。
adder = Circuit().ccx[0,1,2].cx[0,3].cx[1,3]
#0+0
(Circuit() + adder).m[:].run(shots=100)
#0+1
(Circuit().x[1] + adder).m[:].run(shots=100)
#1+0
(Circuit().x[0] + adder).m[:].run(shots=100)
#1+1
(Circuit().x[0,1] + adder).m[:].run(shots=100)
###Output
_____no_output_____
###Markdown
このように、たし算が実装できました。 重ね合わせを利用したたし算ここで、Xゲートでデータを一つ一つ入れる代わりに、Hゲートを使ってたし算をしてみます。
###Code
#Xゲートの代わりにHゲートを入力に使う
(Circuit().h[0,1] + adder).m[:].run(shots=100)
###Output
_____no_output_____
###Markdown
アダマールゲートを利用すると4つのたし算が大体1/4ずつ答えとして出てきました。このように汎用たし算回路を作ると、重ね合わせ状態を利用した計算を実行できます。 もつれを利用したたし算次にHゲートの代わりにa+b=1となるたし算を量子のもつれを使って行ってみます。
###Code
#01と10のもつれを作る
(Circuit().h[0].cx[0,1].x[0] + adder).m[:].run(shots=100)
###Output
_____no_output_____
###Markdown
このように、入力値が01と10がもつれているので、この2つのたし算が約1/2ずつ出てきます。 -------- 解説:回路の作成(1桁目)まずは上の1桁目から作成します。1桁目を見ると、0, 0, 0, 1 の順になっています。表で表すと以下になります。|X|Y|X+Yの2桁目||:-:|:-:|:-:||0|0|0||0|1|0||1|0|0||1|1|1|これを見ると CCXゲートと同じことがわかります。(CCXは入力の頭2つのビットが1ならば3番目のビットを反転させる)このことから入力が 11 の場合は以下の回路を考えることができます。上から2ビットを入力、残りの1ビットを出力だと考えると上の回路から 1 が出力されるのがわかります。 解説:回路の作成(2桁目)次にもう一つのくらいを作成します。2桁目を見ると、0, 1, 1, 0 の順になっています。表で表すと以下になります。|X|Y|X+Yの1桁目||:-:|:-:|:-:||0|0|0||0|1|1||1|0|1||1|1|0|これを見ると CXゲートと同じことがわかります。(左辺がCXの入力で右辺が出力の2番目のビット)このことから入力が 10 の場合は以下の回路を考えることができます。上から2ビットを入力、残りの1ビットを出力だと考えると上の回路から 1 が出力されるのがわかります。 解説:回路の作成(全体)最後に上の二つの回路をまとめます。入力を 00 としてまとめた回路を以下に示します。上2ビットを入力、残りの2ビットが出力です。始めの CCX は1桁目の部分で残りの 2つの CX は2桁目の部分です。 解説:重ね合わせ回路の実装量子の重ね合わせを用いると4パターンを一気に操作することができます。式としては以下の物を考えます。見てわかる通り右辺に入力の4パターンが出てきました。この性質を用いて実装します。回路は以下のようになります。今まで Xゲートを施していた部分を Hゲートにして 4パターンを作っています。 (応用) 一般の足し算一般の10進数同士で足し算を実装します。 $a, b$ の和を考えることとします。$a, b$ は $a = a_n ... a_0$, $b = b_n ... b_0$ と2進数で表示できます。 (ここで n は数が大きい方の位を基準とします。)回路は以下のようになります。$c_i$ を carry ビットと言って繰り上がった数を表します。 足し算回路は carry と呼ばれる繰り上がりの部分と sum の合計の部分にわかれています。まずはこの2つの部分を考察します。 繰り上がり回路は以下のようになります。回路を上から $c_i, a_i, b_i, c_{i+1}$ とすると $c_{i+1}$ に繰り上がりの部分が現れます。 合計各位の和を考えます。回路は以下のようになります。回路を上から $c_i, a_i, b_i$ とすると $b_i$ に3つの数の和の繰り上がりを除いた部分が現れます。 実装足し算回路を実装します。流れは以下のようになります。 1. carry 回路で各位の繰り上がりを求める。 2. CXゲートで最後のcarryの部分を元に戻す。 3. sum 回路で $b_n$ に各位の和を出力する。 4. carry の逆回路でその位の値を元に戻す。 5. sum 回路で $b_i$ に各位の和を出力する。 6. 4,5の繰り返し。以上の流れから $a+b$ は $b_{n+1} ... b_0$ の部分に出力されることが分かります。足し算回路を実装するために下準備をします。 まずは carry とその逆回路, sum 回路を作ります。
###Code
from blueqat import Circuit
def carry(i):
return Circuit().ccx[i+1,i+2,i+3].cx[i+1,i+2].ccx[i,i+2,i+3]
def carry_reverse(i):
return Circuit().ccx[i,i+2,i+3].cx[i+1,i+2].ccx[i+1,i+2,i+3]
def sum(i):
return Circuit().cx[i+1,i+2].cx[i,i+2]
###Output
_____no_output_____
###Markdown
10進数を2進数にする関数も作っておきます。
###Code
def tobinary(A):
return bin(A)[2:]
tobinary(10)
###Output
_____no_output_____
###Markdown
数を回路にマッピングする関数を作ります。
###Code
def digits(a,b):
# 2進数に変換
aa = tobinary(a)
bb = tobinary(b)
alen = len(aa)
blen = len(bb)
# nを決めて大きい方にビット数を合わせる
maxlen = max(alen,blen)
if alen>blen:
bb = bb.zfill(alen)
elif blen>alen:
aa = aa.zfill(blen)
# マッピング
str = ''
for i in range(maxlen):
str += '0' + aa[maxlen-i-1] + bb[maxlen-i-1]
str += '0'
return str
digits(2,2)
###Output
_____no_output_____
###Markdown
回路の初期状態は全て0なので、マッピングした値に合うようにXゲートを施す必要があります。 この関数を作ります。
###Code
def toX(a):
cir = Circuit(len(a))
for i in range(len(a)):
if a[i] == "1":
cir += Circuit().x[i]
return cir
toX("101").m[:].run(shots=100)
###Output
_____no_output_____
###Markdown
最後に出力の部分を考えます。 まずは出力は2進数なので10進数に変換する関数を考えます。
###Code
def todecimal(A):
return int(str(A),2)
todecimal(1001)
###Output
_____no_output_____
###Markdown
回路では $a_i, b_i, c_i$ が混ざった値が出力されるのでその中で $b_i$ のみ取り出します。
###Code
def getb(result):
str = result[-1]
digi = int((len(result)-1)/3)
for i in range(digi):
str += result[-2-i*3]
return todecimal(str)
getb("0000110")
###Output
_____no_output_____
###Markdown
一般化回路以上で全ての準備が整ったので足し算回路を作ります。
###Code
def plus(a,b):
# 2進数表示のマッピング
qubits = len(digits(a,b))
cir1 = toX(digits(a,b))
digi = int((len(digits(a,b))-1)/3)
# 前半のcarry回路
cir2 = Circuit(qubits)
for i in range(digi):
cir2 += carry(i*3)
# 最後の桁の処理
cir3 = Circuit(qubits).cx[-3,-2] + sum((digi-1)*3)
# carryの逆回路とsum回路でbiに和を出力
cir4 = Circuit(qubits)
for i in range(digi-1):
cir4 += carry_reverse((digi-i-2)*3)
cir4 += sum((digi-i-2)*3)
result = (cir1 + cir2 + cir3 + cir4).m[:].run(shots=1)
return getb(result.most_common()[0][0])
###Output
_____no_output_____
###Markdown
実際に計算してみます。
###Code
plus(2,2)
plus(13,15)
plus(70,90)
###Output
_____no_output_____
###Markdown
最後の計算は時間かかりますが、一般の加算器を実装できました。 参考文献V. Vedral, A. Barenco, A. Ekert, "Quantum Networks for Elementary Arithmetic Operations", (Submitted on 16 Nov 1995) https://arxiv.org/pdf/quant-ph/9511018.pdf ひき算量子コンピュータには従来のコンピュータと同じ計算もできるという特徴があります。ひき算の回路を確認します。 今回学ぶこと1. 量子ゲートを使って二進数のひき算を実装2. 量子の重ね合わせを使って1つの回路で複数のひき算 二進数のひき算たし算は符号を判定するccxゲートと、位の足し合わせを実行するcxゲートを使います。今回はa-b=cdという二進数のひき算の量子回路を行います。今回はaとbの値によって4種類のひき算を実装します。それぞれのひき算は、0-0 = 00 => 0000 0-1 = 11 => 0111 1-0 = 01 => 1001 1-1 = 00 => 1100 となります。4量子ビットのビット列で表現し、前半の2量子ビットが入力値aとbで、後半の2量子ビットが出力値cとdです。aとbを入力する回路と、実際に引き算を実現する回路を別に作り、何度か使い回します。1のようなデータの入力はXゲートを使って0を反転させて実装します。 ひき算の回路部分はこのようになります。*はコントロールビットです。```a ---X---*---X---*------- ab -------*-------|---*--- b0 -------X-------|---|--- c0 ---------------X---X--- d```aとbにXゲートを使ってデータを入れると、ccx回路で符号の判定、cx回路で位の引き算を行います。
###Code
#ツールの読み込み
from blueqat import Circuit
#ひき算部分を実現します。
substractor = Circuit().x[0].ccx[0,1,2].x[0].cx[0,3].cx[1,3]
#0-0
(Circuit() + substractor).m[:].run(shots=100)
#0-1
(Circuit().x[1] + substractor).m[:].run(shots=100)
#1-0
(Circuit().x[0] + substractor).m[:].run(shots=100)
#1-1
(Circuit().x[0,1] + substractor).m[:].run(shots=100)
###Output
_____no_output_____
###Markdown
このように、ひき算が実装できました。 重ね合わせを利用したひき算ここで、Xゲートでデータを一つ一つ入れる代わりに、Hゲートを使ってひき算をしてみます。
###Code
#Xゲートの代わりにHゲートを入力に使う
(Circuit().h[0,1] + substractor).m[:].run(shots=100)
###Output
_____no_output_____
###Markdown
アダマールゲートを利用すると4つのひき算が大体1/4ずつ答えとして出てきました。このように汎用ひき算回路を作ると、重ね合わせ状態を利用した計算を実行できます。 もつれを利用したひき算次にHゲートの代わりにa-b=0となるたし算を量子のもつれを使って行ってみます。
###Code
#00と11のもつれを作る
(Circuit().h[0].cx[0,1] + substractor).m[:].run(shots=100)
###Output
_____no_output_____
###Markdown
このように、入力値が00と11がもつれているので、この2つのひき算が約1/2ずつ出てきます。 -------- 解説:回路の作成(符号)まずは上の符号から作成します。符号は、0, 1, 0, 0 の順になっています。表で表すと以下になります。|X|Y|符号||:-:|:-:|:-:||0|0|0||0|1|1||1|0|0||1|1|0|次にXの桁を反転させると|X|Y|符号||:-:|:-:|:-:||1|0|0||1|1|1||0|0|0||0|1|0|これを見ると CCXゲートと同じことがわかります。(CCXは入力の頭2つのビットが1ならば3番目のビットを反転させる)このことから入力が 11 の場合は以下の回路を考えることができます。上から2ビットを入力、残りの1ビットを出力だと考えると上の回路から 1 が出力されるのがわかります。 解説:回路の作成(2桁目)次にもう一つのくらいを作成します。2桁目を見ると、0, 1, 1, 0 の順になっています。表で表すと以下になります。|X|Y|X+Yの1桁目||:-:|:-:|:-:||0|0|0||0|1|1||1|0|1||1|1|0|これを見ると CXゲートと同じことがわかります。(左辺がCXの入力で右辺が出力の2番目のビット)このことから入力が 10 の場合は以下の回路を考えることができます。上から2ビットを入力、残りの1ビットを出力だと考えると上の回路から 1 が出力されるのがわかります。 解説:回路の作成(全体)最後に上の二つの回路をまとめます。入力を 00 としてまとめた回路を以下に示します。上2ビットを入力、残りの2ビットが出力です。始めの CCX は符号の部分で残りの 2つの CX は引き算の部分です。 符号を検知するためにXゲートを施したのでCCXした後は再びXをして戻しています。 解説:重ね合わせ回路の実装量子の重ね合わせを用いると4パターンを一気に操作することができます。式としては以下の物を考えます。見てわかる通り右辺に入力の4パターンが出てきました。この性質を用いて実装します。回路は以下のようになります。今まで Xゲートを施していた部分を Hゲートにして 4パターンを作っています。 (応用) 一般の引き算一般の10進数同士で引き算を実装します。 引き算回路は足し算回路を逆にすることで実装できます。足し算回路は以下のようになります。右から回路を考えます。$a, a+b$ を入力して $b$ を出力します。 $a = a_n ... a_0$, $(a+b) = b_n ... b_0$ と2進数で表示できます。 (ここで n は数が大きい方の位を基準とします。)$c_i$ は繰り下がりを表します。 この回路は carry と呼ばれる繰り上がりの部分と sum の合計の部分にわかれています。まずはこの2つの部分を考察します。 繰り下がりcarry, sum は以下のようになります。回路の始めの部分はこれらを組み合わせて繰り下がりを求めています。 オバーフロービット$a < a+b$ の場合でしか引き算はできません。 判別を $b_{n+1}$ で行います。これをオーバーフロービットと言います。$a < a+b$ のとき $b_{n+1}$ = 0 $a > a+b$ のとき $b_{n+1}$ = 1 になります。$a > a+b$ のときは $2^{n+1} - b$ が出力されます。 実装引き算回路を実装します。流れは以下のようになります。 1. carry の逆回路と sum 回路で各位の繰り下がりを求める。 2. オーバーフローを判別して $b_{n+1}$ に格納する。 3. carry 回路で差を出力する。 以上の流れから $b$ は $b_n ... b_0$ の部分に出力されることが分かります。$b_{n+1}$ はオーバーフロービットです。実装するために下準備をします。 これは足し算回路と同じなので説明は省略します。
###Code
from blueqat import Circuit
def carry(i):
return Circuit().ccx[i+1,i+2,i+3].cx[i+1,i+2].ccx[i,i+2,i+3]
def carry_reverse(i):
return Circuit().ccx[i,i+2,i+3].cx[i+1,i+2].ccx[i+1,i+2,i+3]
def sum_reverse(a):
return Circuit().cx[a,a+2].cx[a+1,a+2]
def tobinary(A):
return bin(A)[2:]
def digits(a,b):
# 2進数に変換
aa = tobinary(a)
bb = tobinary(b)
alen = len(aa)
blen = len(bb)
# nを決めて大きい方にビット数を合わせる
maxlen = max(alen,blen)
if alen>blen:
bb = bb.zfill(alen)
elif blen>alen:
aa = aa.zfill(blen)
# マッピング
str = ''
for i in range(maxlen):
str += '0' + aa[maxlen-i-1] + bb[maxlen-i-1]
str += '0'
return str
def toX(a):
cir = Circuit(len(a))
for i in range(len(a)):
if a[i] == "1":
cir += Circuit().x[i]
return cir
def todecimal(A):
return int(str(A),2)
def getb(result):
str = result[-1]
digi = int((len(result)-1)/3)
for i in range(digi):
str += result[-2-i*3]
return todecimal(str)
def minus(a,ab):
# 入れ替え
c = ab
ab = a
a = c
# 2進数表示のマッピング
qubits = len(digits(a,ab))
cir1 = toX(digits(a,ab))
digi = int((len(digits(a,ab))-1)/3)
# 前半のcarry回路とsum逆回路
cir4 = Circuit(qubits)
for i in range(digi-1):
cir4 += sum_reverse(i*3)
cir4 += carry(i*3)
# 最後の桁の処理
cir3 = sum_reverse((digi-1)*3) + Circuit(qubits).cx[-3,-2]
# carryの逆回路でbiに差を出力
cir2 = Circuit(qubits)
for i in range(digi):
cir2 += carry_reverse((digi-1-i)*3)
result = (cir1 + cir4 + cir3 + cir2).m[:].run(shots=1)
return getb(result.most_common()[0][0])
minus(8,2)
minus(4,2)
minus(50,24)
###Output
_____no_output_____
###Markdown
計算できました。ちなみに $a > a + b$ のときは
###Code
minus(2,4)
###Output
_____no_output_____
###Markdown
こちらもきちんと計算されています。 参考文献https://arxiv.org/pdf/quant-ph/9511018.pdf かけ算今回は二進数同士のかけ算を見てみましょう。 今回学ぶこと1. 二進数のかけ算について2. 回路を作成 二進数の掛け算について2つの数をくらいごとにかけてずらして足し合わせます。その際に量子ビットを利用して桁上がりを考慮します。0 * 0 = 0 0 * 1 = 0 1 * 0 = 0 1 * 1 = 1 これはccxゲートの挙動に対応しています。ccxゲートを利用してかけ算を行い、その後各位を足し合わせます。 例題まずは例題を行います。01 * 10 = ?をときます。答えは、01 * 10 = 0010 となります。これを量子回路を利用して解きます。``` 01 a* 10 b------- 00 c 01 c------- 0 z 0 z------- 0010 x```では、早速実装へ。まずは2進数の数を2つ用意します。a * bを考えますが、aの0のくらいとaの2の位を用意して、それぞれa0とa2とします。bも同様です。今回最終的に実現するのは|a,b,x> => |a, b, a * b >とします。求めたいのはx0,x2,x4,x8です。cは途中の計算用のビット。zは桁上がりビットを想定します。
###Code
from blueqat import Circuit
C1 = Circuit().ccx[0,1,2].ccx[1,3,5].ccx[0,4,6].ccx[3,4,7].ccx[5,6,8].ccx[7,8,9].cx[2,10].cx[5,11].cx[6,11].cx[7,12].cx[8,12].cx[9,13]
#00 * 00 = 0000
(Circuit() + C1).m[:].run(shots=100)
#01 * 01 = 0001
(Circuit().x[0,1] + C1).m[:].run(shots=100)
#10 * 01 = 0010
(Circuit().x[3,1] + C1).m[:].run(shots=100)
#01 * 10 = 0010
(Circuit().x[0,4] + C1).m[:].run(shots=100)
#10 * 10 = 0100
(Circuit().x[3,4] + C1).m[:].run(shots=100)
#11 * 10 = 0110
(Circuit().x[0,3,4] + C1).m[:].run(shots=100)
#10 * 11 = 0110
(Circuit().x[1,3,4] + C1).m[:].run(shots=100)
#11 * 11 = 1001
(Circuit().x[0,1,3,4] + C1).m[:].run(shots=100)
###Output
_____no_output_____
###Markdown
このように全ての掛け算のパターンが量子回路で実現できました。試しに入力を重ね合わせにしてみます。アダマールゲートをXゲートの代わりに入れると、
###Code
(Circuit().h[0,1,3,4] + C1).m[:].run(shots=100)
###Output
_____no_output_____
###Markdown
上手い感じに 00 * 00 から 11 * 11 までを計算することができました。 概要 乗算器ビットは 0, 1 をとるので 二進数のかけ算を考えます。組み合わせは0×0=0, 0×1=0, 1×0=0, 1×1=1となります。これは加算器の1桁目と同じccxゲートで表せます。 乗算器 (2桁×2桁)次は応用で2桁×2桁の乗算を考えてみます。組み合わせは01×00=0000, 01×01=0001, 01×10=0010, 01×11=0011 11×00=0000, 11×01=0011, 11×10=0110, 11×11=1001 01×00=0000, 01×01=0001, 01×10=0010, 01×11=0011 11×00=0000, 11×01=0011, 11×10=0110, 11×11=1001の16通りとなります。 考え方まず試しに 11 × 10 を筆算で考えてみます。1 と 2 の行を合わせた4回の計算をそれぞれ考えます。 1行目 は 11 × 0 , 2行目は 11 × 1 となります。 各項4つの計算はそれぞれ 1桁の積でできるので全て CCXで実装できます。行目1 と 2 の和に関しては繰り上がりを考えないといけないので加算器で実装します。 回路の作成(1、2行目)今回は 1 行目だけ考えます。11 × 0 を CCX ゲートを用いて以下の回路を考えます。上から 2 つのビットは 11 の部分で次の 1 ビットは 0 の部分となります。 出力_1 ,出力_2 は 1 × 0 を表しています。これで 1 行目が出力されました。2 行目も同様にしてできます。 回路の作成(全体)11 × 10 を計算します。上の2つの回路を組み合わせて以下の回路を作ります。左上の操作は 1, 2 行目を計算しています。右下は 1, 2 行目の和を計算しています。 乗除演算一般の足し算と引き算の仕組みを利用してあまりを求めようと思います。回路は以下のようになります。ここで $0 < a,b < N$ とします。最後のビットは overflow を確認するもので temporary bit と呼ばれます。 実装手順$a+b \mod N$ を求めるためには $a+b$ と $N$ の大小を比較する必要があります。$a+b>N$ のとき $0<a,b<N$ より $0<a+b<2N$ よって $0<a+b-N<N$ より $a+b-N = (a+b) \mod N$$a+b<N$ のとき $a+b = (a+b) \mod N$これを量子回路で行います。$a+b>N$$a+b<N$余剰の量子ビットと加算器の最上位の量子ビットの値を使ってうまく場合分けをしています。a+b<N の場合には余剰ビットを使わず余計な操作もありません。 例題簡単な例題を見てみます。$3 + 5 < 7$ のとき $(3 + 5) \mod 7 = 1$$3 + 5 > 11$ のとき $(3 + 5) \mod 11 = 8$これを量子回路を使って実現しようというのが今回の剰余演算です。 実装実装するために下準備をします。今回は上の回路に N をもう1つ加えて temporary bit と N を対応させます。 初期状態を以下のようにします。```c0 --a0 --b0 --c1 --..n0 --n1 --..t --n0 --n1 --..```また各桁は $0<a,b<N$ より $N$ に統一します。
###Code
from blueqat import Circuit
#ビットのキャリー回路
def carry(a):
return Circuit().ccx[a+1,a+2,a+3].cx[a+1,a+2].ccx[a,a+2,a+3]
#ビットのキャリー回路の逆
def carry_reverse(a):
return Circuit().ccx[a,a+2,a+3].cx[a+1,a+2].ccx[a+1,a+2,a+3]
#ビットの合計
def sum(a):
return Circuit().cx[a+1,a+2].cx[a,a+2]
#ビットの合計の逆
def sum_reverse(a):
return Circuit().cx[a,a+2].cx[a+1,a+2]
#10進数を2進数に
def tobinary(A):
return bin(A)[2:]
#3つの10進数を2進数に直して、桁を揃えてモジュロ回路の順にビットを並べ替える。一番下に判定用のビットを1つ加える。
def digits2(a,b,n):
aa = tobinary(a)
bb = tobinary(b)
nn = tobinary(n)
nlen = len(nn)
aa = aa.zfill(nlen)
bb = bb.zfill(nlen)
str = ''
for i in range(nlen):
str += '0' + aa[nlen-i-1] + bb[nlen-i-1]
str += '0'
for i in range(nlen):
str += nn[nlen-i-1]
str += '0'
for i in range(nlen):
str += nn[nlen-i-1]
return str
#ビット文字列をXゲートを使ったデータ入力回路に変換
def toX(a):
cir = Circuit(len(a))
for i in range(len(a)):
if a[i] == "1":
cir += Circuit().x[i]
return cir
#足し算回路
def plus(a,b,n):
qubits = len(digits2(a,b,n))
digi = len(tobinary(n))
cir2 = Circuit(qubits)
for i in range(digi):
cir2 += carry(i*3)
cir3 = Circuit(qubits).cx[(digi-1)*3+1,(digi-1)*3+2] + sum((digi-1)*3)
cir4 = Circuit(qubits)
for i in range(digi-1):
cir4 += carry_reverse((digi-i-2)*3)
cir4 += sum((digi-i-2)*3)
cir_plus = cir2 + cir3 + cir4
return cir_plus
#引き算回路
def minus(a,ab,n):
qubits = len(digits2(a,ab,n))
digi = len(tobinary(n))
cir4 = Circuit(qubits)
for i in range(digi-1):
cir4 += sum_reverse(i*3)
cir4 += carry(i*3)
cir3 = sum_reverse((digi-1)*3) + Circuit(qubits).cx[(digi-1)*3+1,(digi-1)*3+2]
cir2 = Circuit(qubits)
for i in range(digi):
cir2 += carry_reverse((digi-1-i)*3)
cir_minus = cir4 + cir3 + cir2
return cir_minus
#aとNを交換
def swap(n):
digi = len(tobinary(n))
cir = Circuit(5*digi+2)
for i in range(digi):
cir += Circuit(5*digi+2).swap[3*i+1,3*digi+1+i]
return cir
#2進数を10進数に
def todecimal(A):
return int(str(A),2)
#回路から解だけを抜き出す
def getb(result,n):
str = ""
digi = len(tobinary(n))
for i in range(digi):
str += result[3*(digi-i)-1]
return todecimal(str)
###Output
_____no_output_____
###Markdown
一般化回路以上で全ての準備が整ったので剰余演算回路を作ります。
###Code
def adder_mod(a,b,n):
digi = len(tobinary(n))
# 最初の部分
part1 = toX(digits2(a,b,n)) + plus(a,b,n) + swap(n) + minus(a,b,n)
# overflow を temporary bit に格納
part2 = Circuit(5*digi+2).x[digi*3].cx[digi*3,digi*4+1].x[digi*3]
# temporary bit で N を返す
part3 = Circuit(5*digi+2)
for i in range(digi):
part3 += Circuit(5*digi+2).ccx[4*digi+2+i,4*digi+1,3*i+1]
# 最後の部分
part4 = minus(a,b,n)+Circuit(5*digi+2).cx[digi*3,digi*4+1]+plus(a,b,n)
result = (part1+part2+part3+plus(a,b,n)+part3+swap(n)+part4).m[:].run(shots=1)
return getb(result.most_common()[0][0],n)
###Output
_____no_output_____
###Markdown
実際に計算してみます。
###Code
adder_mod(4,3,5)
adder_mod(4,4,5)
adder_mod(1,5,6)
###Output
_____no_output_____ |
m04/data-prep/tree-census.ipynb | ###Markdown
Prep
###Code
import pandas as pd
import numpy as np
import plotly.express as px
tree = pd.read_csv('../tree-census-2015.csv')
tree.shape
tree.columns
# tree.loc[:, 'genus'] = tree.spc_latin.str.extract('^([^ ]*)')
tree.loc[:, 'count'] = 1
###Output
_____no_output_____
###Markdown
Question 1
###Code
tree['health'].value_counts(dropna = False)
tree.head()
tree['borough'].value_counts(dropna = False)
tree_q1 = tree[['borough', 'spc_common', 'health', 'count', 'steward']]
tree.head()
tree_q1.head()
tree_q1.columns
tree_q1.index
agg_q1 = tree_q1.groupby(['borough', 'spc_common', 'health', 'steward']).sum().reset_index()
agg_q1
agg_q1.shape
agg_q1.to_csv('agg.csv')
tree['steward'].value_counts()
tree['spc_common'].value_counts().shape
tree.iloc[1]
(tree['spc_common'].isna() | tree['health'].isna() | tree['borough'].isna() | tree['steward'].isna()).sum()
tree['count'].sum()
tree.shape
agg_q1.spc_common.to_dict()
c2 = ['spc_common', 'spc_latin']
spc_map = tree[c2].groupby(c2).count().reset_index()
spc_map.to_clipboard()
def series2dropdown(s):
l = list()
for v in s.unique():
l.append({'label': v, 'value': v})
return(l)
series2dropdown(agg_q1.borough)
series2dropdown(agg_q1.health)
df = pd.read_csv('../agg-post.csv')
df.Health = pd.Categorical(df.Health, categories = ['Poor', 'Fair', 'Good'])
dff = df[df['Species Common'] == 'Schubert Chokecherry']
dffa = dff.groupby(['Borough', 'Health']).sum()
dffb = dff.groupby(['Borough']).sum()
dffp = (dffa / dffb).reset_index()
fig = px.bar(dffp, x = "Borough", y = "Count", color = "Health", title="Wide-Form Input")
fig.show()
pd.Categorical(df['Species Common'])
###Output
_____no_output_____ |
docs/notebooks/user-output.ipynb | ###Markdown
UserOutput `UserOutput` is as generic class which can export scrapped users.Under the hood it has abstract method `export_users(users: List[User])`. There are few implementations of `UserOutput`
###Code
import stweet as st
###Output
_____no_output_____
###Markdown
PrintUserOutput `PrintUserOutput` prints all scrapper user.
###Code
st.PrintUserOutput();
###Output
_____no_output_____
###Markdown
CollectorUserOutput `CollectorUserOutput` collect users in memory. This is the best option when sbd need to process small part of tweets.To get all tweets you need to run `get_scrapped_users()`.
###Code
st.CollectorUserOutput();
###Output
_____no_output_____
###Markdown
CsvUserOutput `CsvUserOutput` stores users in csv file. It has two parameters `file_location` and `add_header_on_start`.When `add_header_on_start` is `True` header is adding only when file is empty. It is possible to continue storing the users in file in next tasks.
###Code
st.CsvUserOutput(
file_location='my_csv_file.csv',
add_header_on_start=True
);
###Output
_____no_output_____
###Markdown
JsonLineFileUserOutput `JsonLineFileUserOutput` stores users in file in json lines. This solution is better because it can be problem with fast saving new user in large files, also it can be problem with reading. Using json lines it is possible to read line by line, without read whole document into memory.Class have only one property – `file_name`, this is the file to store users in json line format.
###Code
st.JsonLineFileUserOutput(
file_name='my_jl_file.jl'
);
###Output
_____no_output_____
###Markdown
PrintEveryNUserOutput `PrintEveryNUserOutput` print event N-th scrapped user. This is best solution to track that new users are scrapping.Class have only one parameter – `each_n`, this is the N value described above.
###Code
st.PrintEveryNUserOutput(
each_n=100
);
###Output
_____no_output_____ |
examples/notebooks/tf_2_2/keras_mnist_fashion_save_model/keras_mnist_fashion_save_model.ipynb | ###Markdown
Keras MNIST Fashion Save Model ExampleSingle fully connected hidden layer exported for prediction on device with tensor/io. Exported using the keras `model.save` api.Based on https://www.tensorflow.org/tutorials/keras/classification
###Code
import os
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras import layers
import PIL.Image as Image
import matplotlib.pylab as plt
%matplotlib inline
def enable_memory_growth():
physical_devices = tf.config.experimental.list_physical_devices('GPU')
try:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# tf.config.gpu.set_per_process_memory_growth(True)
# tf.config.gpu.set_per_process_memory_fraction(0.75)
except:
print('Invalid device or cannot modify virtual devices once initialized.')
if "TF_GPU_GROWTH" in os.environ:
print("Enabling GPU memory growth")
enable_memory_growth()
###Output
Enabling GPU memory growth
Invalid device or cannot modify virtual devices once initialized.
###Markdown
Fashion MNIST
###Code
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = [
'T-shirt/top',
'Trouser',
'Pullover',
'Dress',
'Coat',
'Sandal',
'Shirt',
'Sneaker',
'Bag',
'Ankle boot'
]
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
###Output
_____no_output_____
###Markdown
Model
###Code
def make_model():
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
return model
model = make_model()
model.summary()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=10)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
probability_model = tf.keras.Sequential([
model,
tf.keras.layers.Softmax()
])
predictions = probability_model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Export with model.save
###Code
PATH = 'tmp/keras-mnist-fashion-save-model'
! rm -r 'tmp/keras-mnist-fashion-save-model'
model.save(PATH, save_format='tf')
###Output
WARNING:tensorflow:From /home/phildow/virtualenvs/tf22/lib/python3.6/site-packages/tensorflow/python/ops/resource_variable_ops.py:1817: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.
Instructions for updating:
If using Keras pass *_constraint arguments to layers.
INFO:tensorflow:Assets written to: tmp/keras-mnist-fashion-save-model/assets
###Markdown
Results
###Code
! saved_model_cli show --all --dir tmp/keras-mnist-fashion-save-model/
###Output
MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['__saved_model_init_op']:
The given SavedModel SignatureDef contains the following input(s):
The given SavedModel SignatureDef contains the following output(s):
outputs['__saved_model_init_op'] tensor_info:
dtype: DT_INVALID
shape: unknown_rank
name: NoOp
Method name is:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['flatten_input'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 28, 28)
name: serving_default_flatten_input:0
The given SavedModel SignatureDef contains the following output(s):
outputs['dense_1'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 10)
name: StatefulPartitionedCall:0
Method name is: tensorflow/serving/predict
WARNING:tensorflow:From /home/phildow/virtualenvs/tf22/lib/python3.6/site-packages/tensorflow/python/ops/resource_variable_ops.py:1817: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.
Instructions for updating:
If using Keras pass *_constraint arguments to layers.
Defined Functions:
Function Name: '__call__'
Option #1
Callable with:
Argument #1
inputs: TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name='inputs')
Argument #2
DType: bool
Value: False
Argument #3
DType: NoneType
Value: None
Option #2
Callable with:
Argument #1
flatten_input: TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name='flatten_input')
Argument #2
DType: bool
Value: False
Argument #3
DType: NoneType
Value: None
Option #3
Callable with:
Argument #1
flatten_input: TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name='flatten_input')
Argument #2
DType: bool
Value: True
Argument #3
DType: NoneType
Value: None
Option #4
Callable with:
Argument #1
inputs: TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name='inputs')
Argument #2
DType: bool
Value: True
Argument #3
DType: NoneType
Value: None
Function Name: '_default_save_signature'
Option #1
Callable with:
Argument #1
flatten_input: TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name='flatten_input')
Function Name: 'call_and_return_all_conditional_losses'
Option #1
Callable with:
Argument #1
flatten_input: TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name='flatten_input')
Argument #2
DType: bool
Value: False
Argument #3
DType: NoneType
Value: None
Option #2
Callable with:
Argument #1
flatten_input: TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name='flatten_input')
Argument #2
DType: bool
Value: True
Argument #3
DType: NoneType
Value: None
Option #3
Callable with:
Argument #1
inputs: TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name='inputs')
Argument #2
DType: bool
Value: False
Argument #3
DType: NoneType
Value: None
Option #4
Callable with:
Argument #1
inputs: TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name='inputs')
Argument #2
DType: bool
Value: True
Argument #3
DType: NoneType
Value: None
###Markdown
Tensor/IONote in the corresponding model.json that the name and shape of the inputs and outputs matches the values you see in the signature definition. Take special care to note that the name is taken from the layer's name and not from the key in the inputs or outputs dictionary:```inputs['flatten_input'] tensor_info: dtype: DT_FLOAT shape: (-1, 28, 28) name: serving_default_flatten_input:0outputs['dense_1'] tensor_info: dtype: DT_FLOAT shape: (-1, 10) name: StatefulPartitionedCall:0```
###Code
! cat model.json
###Output
{
"name": "Keras MNIST Fashion Save Model Example",
"details": "Basic Keras Model for Fashion MNSIT dataset exported for prediction using the keras model.save api",
"id": "keras-mnist-clothing-save-model",
"version": "1",
"author": "doc.ai",
"license": "Apache License. Version 2.0 http://www.apache.org/licenses/LICENSE-2.0",
"model": {
"file": "predict",
"quantized": false,
"type": "mnist.fashion.keras",
"backend": "tensorflow",
"modes": ["predict"]
},
"inputs": [
{
"name": "serving_default_flatten_input",
"type": "array",
"shape": [-1,28,28]
}
],
"outputs": [
{
"name": "StatefulPartitionedCall",
"type": "array",
"shape": [-1,10]
}
]
} |
Euler 017 - Number letter counts.ipynb | ###Markdown
Euler Problem 17================If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?**NOTE:** Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage.
###Code
def spellout(n):
small = [3, 3, 5, 4, 4, 3, 5, 5, 4] # 1 - 9
teens = [6, 6, 8, 8, 7, 7, 9, 8, 8] # 11 - 19
tens = [3, 6, 6, 5, 5, 5, 7, 6, 6] # 10, 20, ..., 90
if n >= 1000:
s = spellout(n//1000) + 8
r = n % 1000
if r > 99:
s = s + spellout(n%1000)
elif r > 0:
s = s + 3 + spellout(n%1000)
return s
if n > 99:
s = spellout(n//100) + 7
r = n % 100
if r:
s = s + 3 + spellout(n % 100)
return s
if n < 10:
return small[n-1]
elif n > 10 and n < 20:
return teens[n-11]
else:
s = tens[n//10-1]
if (n % 10):
s = s + small[(n % 10) - 1]
return s
print(sum(spellout(i) for i in range(1, 1001)))
###Output
21124
|
nbs/02_template.filters.ipynb | ###Markdown
Filters> Filters are used to define pass/fail criteria for screening molecules OverviewA core concept in MRL is using molecular templates, expressed with the `Template` class, to define chemical spaces. A template contains a set of filters that define desirable property ranges, such as ```Molecular weight: 250-450Rotatable bonds: Less than 8PAINS Filter: Pass```These property specifications are expressed through the `Filter` class. The primary function of a filter is to define some pass/fail criteria for a molecule. This is done through the `property_function` and `criteria_function` methods. `property_function` computes some value based on the input molecule. `criteria_function` converts the output of `property_function` to a single boolean value.Filters follow the convention that `True` means the input `Mol` has passed the `criteria_function` function, while `False` means the `Mol` has failed the `criteria_function`.We can also use filters to express a soft preference for chemical properties by adding a score. If a score is provided, the output of `property_function` and `criteria_function` are sent to a `ScoreFunction` subclass, which returns a numeric value.This allows us to use filters to define both the __must-have__ chemical properties as well as __nice-to-have__ properties. For example:```Must Have:Molecular weight: 250-450, Rotatable bonds: Less than 8PAINS Filter: PassNice To Have:Molecular weight: 350-400 (+1), TPSA: Less than 80 (+1)Substructure Match: '[6]1:[6]:[7]:[6]:[6]:[6]:1' (+3)Substructure Match: '[6]1:[6]:[7]:[7]:[7]:[6]:1' (-1)```
###Code
#hide
from nbdev.showdoc import *
%load_ext autoreload
%autoreload 2
# export
from mrl.imports import *
from mrl.core import *
from mrl.chem import *
###Output
/home/dmai/miniconda3/envs/mrl/lib/python3.7/importlib/_bootstrap.py:219: RuntimeWarning: to-Python converter for boost::shared_ptr<RDKit::FilterCatalogEntry const> already registered; second conversion method ignored.
return f(*args, **kwds)
###Markdown
Score Functions`ScoreFunction` classes take the outputs of a filter (both `property_function` and `criteria_function`, see `Filter` for more details) and return a numeric score. This can be used to incentivise a generative model to produce molecules with specific properties without making those properties must-have constraints.`ConstantScore` returns a standard value based on if the `criteria_output` is `True` or `False`. For more sophisticated scores like those seen in MPO functions, something like `LinearDecayScore` can be used, which returns a constant score within a certain range, but decays the score outside that range.`ScoreFunction` can be subclassed with any variant that takes as input `property_output` and `criteria_output` and returns a numeric value
###Code
# export
class ScoreFunction():
"Base score function"
def __call__(self, property_output, criteria_output):
pass
class NoScore(ScoreFunction):
"Pass through for no score"
def __call__(self, property_output, criteria_output):
return 0.
class PassThroughScore(ScoreFunction):
"Pass through for property_output"
def __call__(self, property_output, criteria_output):
return property_output
class ModifiedScore(ScoreFunction):
"Base class for scores where property_output is modified by some function"
def __init__(self, fail_score=0.):
self.fail_score = float(fail_score)
def __call__(self, property_output, criteria_output):
if not criteria_output and self.fail_score is not None:
output = self.fail_score
else:
output = self.compute_score(property_output)
return output
def compute_score(self, property_output):
raise NotImplementedError
class ConstantScore(ModifiedScore):
"Returns pass_score if criteria_output, else fail_score"
def __init__(self, pass_score, fail_score):
super().__init__(fail_score)
self.pass_score = float(pass_score)
def compute_score(self, property_output):
return self.pass_score
class WeightedPropertyScore(ModifiedScore):
"Returns weight*property_output if criteria_output, else fail_score"
def __init__(self, weight, fail_score=0.):
super().__init__(fail_score)
self.weight = weight
def compute_score(self, property_output):
return property_output*self.weight
class PropertyFunctionScore(ModifiedScore):
"Returns output `function(property_output)`"
def __init__(self, function, fail_score=0.):
super().__init__(fail_score)
self.function = function
def compute_score(self, property_output):
return self.function(property_output)
class LinearDecayScore(ScoreFunction):
'''
LinearDecayScore - score with linear decay. `low_start<low_end<high_start<high_end`
Returns `pass_score` if `criteria_output=True` and
`low_end<=property_output<=high_start`. If `low_start<=property_output<=low_end` or
`high_start<=property_output<=high_end`, the score is a linear interpolation between `pass_score`
and `fail_score`. Otherwise, returns `fail_score`.
One of `low_end`, `high_start` must be not None.
If one of `low_end`, `high_start` is None, the corresponding bound is ignored
if `low_start` or `high_end` is None, the score immediately drops to `fail_score`
'''
# low_start < low_end < high_start < high_end
def __init__(self, pass_score, low_start, low_end,
high_start, high_end, fail_score=0.):
self.pass_score = float(pass_score)
self.fail_score = float(fail_score)
self.low_start = low_start
self.high_start = high_start
self.low_end = low_end
self.high_end = high_end
assert (self.low_end is not None) or (self.high_start is not None), ("One of (low_end, high_start) "
"must not be None")
def check_bound(self, property_output, bound, boundtype):
if bound is None:
output = True
else:
if boundtype=='low':
output = property_output>=bound
else:
output = property_output<=bound
return output
def __call__(self, property_output, criteria_output):
if criteria_output:
low_bound = self.check_bound(property_output, self.low_end, 'low')
high_bound = self.check_bound(property_output, self.high_start, 'high')
if low_bound and high_bound:
# in main range
output = self.pass_score
elif low_bound:
# above high start:
high_end = self.check_bound(property_output, self.high_end, 'high')
if high_end:
# between high_start and high_end
if (self.high_start is not None) and (self.high_end is not None):
fraction = (property_output - self.high_start)/(self.high_end - self.high_start)
output = self.pass_score*(1-fraction) + self.fail_score*fraction
else:
output = self.fail_score
else:
output = self.fail_score
else:
# below low end
low_start = self.check_bound(property_output, self.low_start, 'low')
if low_start:
# between low_start and low_end
if (self.low_start is not None) and (self.low_end is not None):
fraction = (property_output - self.low_start)/(self.low_end - self.low_start)
output = self.pass_score*fraction + self.fail_score*(1-fraction)
else:
output = self.fail_score
else:
output = self.fail_score
else:
output = self.fail_score
return output
score = LinearDecayScore(1, 1,5,10,15, fail_score=-1)
plt.plot(np.linspace(0,16),[score(i, True) for i in np.linspace(0,16)])
score = LinearDecayScore(1, 1,5,None, None, fail_score=-1)
plt.plot(np.linspace(0,16),[score(i, True) for i in np.linspace(0,16)])
###Output
_____no_output_____
###Markdown
FiltersAs described before, Filters serve the function of defining some pass/fail criteria for a given molecule. Filters contain a `property_function`, which computes some property of a molecule, and a `criteria_function` which converts the output of the property function to a boolean value, following the convention where `True` denotes a pass.Filters can optionally contain a score, which can be any of `(None, int, float, ScoreFunction)`. A score of `None` is converted to `NoScore`, while a numeric score (int or float) is converted to `ConstantScore`. The `eval_mol` function evaluates the filter on a given input. If `with_score=True` is passed, the output of `self.score_function` is returned, while if `with_score=False` is passed, the boolean output of `criteria_function` is returned
###Code
set(['protein', 'dna'])
# export
class Filter():
'''
Filter - base filter function class
Inputs:
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
- `mode str['smile', 'protein', 'dna', 'rna']`: determines
how inputs are converted to Mol objects
'''
def __init__(self, score=None, name=None, fail_score=0., mode='smile'):
self.score_function = self.set_score(score, fail_score)
self.name = name
self.priority = 0
self.mode = mode
def set_score(self, score_function, fail_score):
if score_function is None:
score = NoScore()
elif type(score_function) in (int, float):
score = ConstantScore(score_function, fail_score=fail_score)
elif isinstance(score_function, ScoreFunction):
score = score_function
else:
raise ValueError('Invalid score_function input, must be one of (None, int, float, ScoreFunction)')
return score
def __call__(self, mol, with_score=False):
output = maybe_parallel(self.eval_mol, mol, with_score=with_score)
return output
def to_mol(self, input):
if self.mode=='smile':
mol = to_mol(input)
elif self.mode=='protein':
mol = to_protein(input)
elif self.mode=='dna':
mol = to_dna(input)
elif self.mode=='rna':
mol = to_rna(input)
else:
raise ValueError("`self.mode` must be one of `['smile', 'protein', 'dna', 'rna']`")
return mol
def to_string(self, input):
if self.mode=='smile':
string = to_smile(input)
elif self.mode in set(['protein', 'dna', 'rna']):
string = to_sequence(input)
else:
raise ValueError("`self.mode` must be one of `['smile', 'protein', 'dna', 'rna']`")
return string
def eval_mol(self, mol, with_score=False):
'''
eval_mol - evaluates `Mol` based on `property_function`.
if `with_score=True`, returns the output of `score_function`, else
returns the output of `property_function
'''
mol = self.to_mol(mol)
property_output = self.property_function(mol)
criteria_output = self.criteria_function(property_output)
if with_score:
output = self.score_function(property_output, criteria_output)
else:
output = criteria_output
return output
def property_function(self, mol):
raise NotImplementedError
def criteria_function(self, property_output):
raise NotImplementedError
def __repr__(self):
if self.name is not None:
output = f'{self.name}'
else:
output = 'Unnamed Filter'
return output
###Output
_____no_output_____
###Markdown
`ValidityFilter` and `SingleCompoundFilter` are general molecule quantity filters. Generative models may produce invalid structures or multiple compounds when a single compound is desired. These filters can be used to eliminate those outputs
###Code
# export
class ValidityFilter(Filter):
'''
ValidityFilter - checks to see if a given `Mol` is a valid compound
Inputs:
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
- `mode str['smile', 'protein', 'dna', 'rna']`: determines
how inputs are converted to Mol objects
'''
def __init__(self, score=None, name=None, fail_score=0., mode='smile'):
if name is None:
name = 'Vaidity Filter'
super().__init__(score=score, name=name, fail_score=fail_score, mode=mode)
self.priority=2
def property_function(self, mol):
mol = self.to_mol(mol)
return mol
def criteria_function(self, property_output):
return property_output is not None and property_output.GetNumAtoms() > 0
class SingleCompoundFilter(Filter):
'''
SingleCompoundFilter - checks to see if a given `Mol` is a single compound
Inputs:
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
- `mode str['smile', 'protein', 'dna', 'rna']`: determines
how inputs are converted to Mol objects
'''
def __init__(self, score=None, name=None, fail_score=0., mode='smile'):
if name is None:
name = 'Single Compound Filter'
super().__init__(score, name, fail_score=fail_score, mode=mode)
self.priority=1
def property_function(self, mol):
smile = self.to_string(mol)
return smile
def criteria_function(self, property_output):
return not '.' in property_output
f = ValidityFilter()
assert f('CC')
assert not f('cc') # invalid smiles
f = ValidityFilter(mode='protein')
assert f('MAARG')
assert not f('MXRA')
f = SingleCompoundFilter()
assert f('CC')
assert not f('CC.CC')
# export
class CharacterCountFilter(Filter):
'''
CharacterCountFilter - validates `Mol` based on the
count of the specified character
Inputs:
- `chars list[str]`: character to count
- `min_val Optional[float, int]`: min value for count
- `max_val Optional[float, int]`: max value for count
` `per_length bool`: if True, counts are normalized by string length
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
- `mode str['smile', 'protein', 'dna', 'rna']`: determines
how inputs are converted to Mol objects
'''
def __init__(self, chars, min_val=None, max_val=None, per_length=False,
score=None, name=None, fail_score=0., mode='smile'):
if name is None:
name = f"Character Filter {''.join(chars)}"
super().__init__(score, name, fail_score=fail_score, mode=mode)
self.priority = 1
self.chars = chars
self.min_val = min_val
self.max_val = max_val
self.per_length = per_length
def property_function(self, mol):
smile = self.to_string(mol)
return smile
def criteria_function(self, property_output):
values = [property_output.count(i) for i in self.chars]
if self.per_length:
values = [i/len(property_output) for i in values]
lower_bound = (min(values)>=self.min_val) if self.min_val is not None else True
upper_bound = (max(values)<=self.max_val) if self.max_val is not None else True
output = lower_bound and upper_bound
return output
def __repr__(self):
output = f'{self.name}' + f' ({self.min_val}, {self.max_val})'
return output
class AttachmentFilter(CharacterCountFilter):
'''
AttachmentFilter - validates `Mol` based on the
number of `*` attachment points
Inputs:
- `min_val Optional[float, int]`: min attachment value
- `max_val Optional[float, int]`: max attachment value
` `per_length bool`: if True, counts are normalized by string length
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
- `mode str['smile', 'protein', 'dna', 'rna']`: determines
how inputs are converted to Mol objects
'''
def __init__(self, min_val=None, max_val=None, per_length=False,
score=None, name=None, fail_score=0., mode='smile'):
super().__init__(chars=['*'],
min_val=min_val,
max_val=max_val,
per_length=per_length,
score=score,
name=name,
fail_score=fail_score,
mode=mode
)
f = CharacterCountFilter(['C'], min_val=1, max_val=3)
assert f('CC')
assert not f('N')
f = CharacterCountFilter(['A'], min_val=0, max_val=3, mode='protein')
assert f('MMM')
assert not f('MAMAMAMA')
f = CharacterCountFilter(['C'], min_val=0.1, max_val=0.4, per_length=True)
assert f('CCNNN')
assert not f('N')
f = CharacterCountFilter(['D', 'A', 'M'], min_val=0, max_val=2, mode='protein')
assert f('D')
assert f('DAM')
assert not f('DDDAM')
f = AttachmentFilter(2, 2)
assert f('*CC*')
assert not f('*CC')
###Output
_____no_output_____
###Markdown
The most common type of filter used is one that determines if a specific molecular property is within a certain range. This is implemented with the `PropertyFilter` class. `PropertyFilter` will work for any `mol_function` that takes in a `Mol` object and returns a numeric output. The numeric output is then compared to `min_val` and `max_val`. Unspecified bounds (ie `max_val=None`) are ignored.For convenience, a number of `PropertyFilter` named after specific properties are provided
###Code
# export
class PropertyFilter(Filter):
'''
PropertyFilter - filters mols based on `mol_function`
Inputs:
- `mol_function Callable`: any function that takes as input a `Mol` object and
returns a single numeric value
- `min_val Optional[float, int]`: inclusive lower bound for filter (ignored if None)
- `max_val Optional[float, int]`: inclusive upper bound for filter (ignored if None)
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
- `mode str['smile', 'protein', 'dna', 'rna']`: determines
how inputs are converted to Mol objects
'''
def __init__(self, mol_function, min_val=None, max_val=None, score=None,
fail_score=0., name=None, mode='smile'):
self.mol_function = mol_function
self.min_val = min_val
self.max_val = max_val
if name is None:
name = mol_function.__name__
super().__init__(score, name, fail_score=fail_score, mode=mode)
def property_function(self, mol):
return self.mol_function(mol)
def criteria_function(self, property_output):
lower_bound = (property_output>=self.min_val) if self.min_val is not None else True
upper_bound = (property_output<=self.max_val) if self.max_val is not None else True
output = lower_bound and upper_bound
return output
def __repr__(self):
output = f'{self.name}' + f' ({self.min_val}, {self.max_val})'
return output
class MolWtFilter(PropertyFilter):
"Molecular weight filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(molwt, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class HBDFilter(PropertyFilter):
"Hydrogen bond donor filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(hbd, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class HBAFilter(PropertyFilter):
"Hydrogen bond acceptor filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(hba, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class TPSAFilter(PropertyFilter):
"TPSA filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(tpsa, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class RotBondFilter(PropertyFilter):
"Rotatable bond filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(rotbond, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class SP3Filter(PropertyFilter):
"Fractioon sp3 filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(fsp3, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class LogPFilter(PropertyFilter):
"LogP filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(logp, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class PenalizedLogPFilter(PropertyFilter):
"Penalized LogP filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(penalized_logp, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class RingFilter(PropertyFilter):
"Ring filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(rings, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class HeteroatomFilter(PropertyFilter):
"Heteroatom filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(heteroatoms, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class AromaticRingFilter(PropertyFilter):
"Aromatic ring filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(aromaticrings, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class HeavyAtomsFilter(PropertyFilter):
"Number of heavy atoms filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(heavy_atoms, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class MRFilter(PropertyFilter):
"Molar refractivity of atoms filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(molar_refractivity, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class ChargeFilter(PropertyFilter):
"Formal charge of atoms filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(formal_charge, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class TotalAtomFilter(PropertyFilter):
"Total number of atoms filter (incudes H)"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(all_atoms, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class QEDFilter(PropertyFilter):
"Total number of atoms filter (incudes H)"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(qed, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class SAFilter(PropertyFilter):
"SA Score fillter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(sa_score, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class LooseRotBondFilter(PropertyFilter):
"Loose Rotatable bond filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(loose_rotbond, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class MaxRingFilter(PropertyFilter):
"Max ring size filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(max_ring_size, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class MinRingFilter(PropertyFilter):
"Min ring size filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(min_ring_size, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class BridgeheadFilter(PropertyFilter):
"Number of bridgehead carbons filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(num_bridgeheads, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class SpiroFilter(PropertyFilter):
"Spiro carbon filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(num_spiro, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class ChiralFilter(PropertyFilter):
"Chiral center filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(min_ring_size, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
class RotChainFilter(PropertyFilter):
"Longest rotatable bond chain filter"
def __init__(self, min_val, max_val, score=None, name=None, **kwargs):
super().__init__(rot_chain_length, min_val=min_val, max_val=max_val, score=score, name=name, **kwargs)
f = PropertyFilter(molwt, 100, 300)
assert f('O=C(C)Oc1ccccc1C(=O)O')
f = PropertyFilter(molwt, None, None, score=5)
assert f('O=C(C)Oc1ccccc1C(=O)O', with_score=True) == 5
f = MolWtFilter(100, 500, score=WeightedPropertyScore(2.))
assert f('O=C(C)Oc1ccccc1C(=O)O', with_score=True) == 2*molwt(to_mol('O=C(C)Oc1ccccc1C(=O)O'))
f = MolWtFilter(100, 500, mode='protein')
assert f('MAAR')
f = MolWtFilter(400, 500)
assert f('O=C(C)Oc1ccccc1C(=O)O') == False
f = HeteroatomFilter(2, 4)
assert f('O=C(C)Oc1ccccc1C(=O)O')
###Output
_____no_output_____
###Markdown
Another common filter is based on substructure matching. Substructure filtering is typically done in a hard filter fashion used to remove compounds (ie exclude all compounds with PAINS structures). Substructure filters can also be used in a soft filter fashion to express a preference for molecular substructures. For example, if you would like (but not require) your compound to have a 3-ring scaffold system, that can be implemented through structural filtering as well.Structure filters take in a list of SMARTS to filter against (or any subclass of `Catalog`), as well as a `criteria` (any, all, float).If `citeria=any`, `property_function` will return `True` if any filters are matched.If `citeria=all`, `property_function` will return `True` if all filters are matched.If `citeria=float`, `property_function` will return `True` if `float` percent of filters (inclusive) are matched.If `criteria=int`, `property_function` will return `True` if more than `int` filters (inclusive) are matched.`criteria_function` will then evaluate the `property_function` output based on `criteria`.The `exclude` parameter defines how the filter treats structure matches. Substructure matching returns `True` when a match is found. If `exclude=True`, the filter will return `False` when a match is found. If `exclude=False`, the filter will return `True` when a match is found.To make this more explicit, the `ExclusionFilter` class always has the exclusion behavior and the `KeepFilter` class always has the inclusion behavior.
###Code
# export
def criteria_check(criteria):
criteria_check1 = (criteria in ('any', 'all'))
criteria_check2 = (type(criteria)==float and 0<=criteria<=1)
criteria_check3 = (type(criteria)==int)
return any([criteria_check1, criteria_check2, criteria_check3])
class StructureFilter(Filter):
'''
StructureFilter - filters mols based on structures in `smarts`
Inputs:
- `smarts [list, SmartsCatalog]`: list of smarts strings for filtering or `SmartsCatalog`
- `exclude bool`: if True, filter returns `False` when a structure match is found
- `criteria ['any', 'all', float, int]`: match criteria.
(match any filter, match all filters, match float percent of filters,
match int number of filters)
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
'''
def __init__(self, smarts, exclude=True, criteria='any', score=None, name=None, fail_score=0.):
self.catalog = self.get_catalog(smarts)
self.exclude = exclude
if not criteria_check(criteria):
raise ValueError('`criteria` must be `any`, `all`, a float between 0 and 1, or an int')
self.criteria = criteria
if name is None:
name = f'Structure filter, criteria: {criteria}, exclude: {exclude}'
super().__init__(score, name, fail_score=fail_score)
def property_function(self, mol):
return self.catalog(mol, criteria=self.criteria)
def criteria_function(self, property_output):
if not is_container(property_output):
property_output = [property_output]
if self.criteria=='any':
output = any(property_output)
else:
output = all(property_output)
if self.exclude:
output = not output
return output
def get_catalog(self, smarts):
if isinstance(smarts, Catalog):
smarts = smarts
else:
smarts = SmartsCatalog(smarts)
return smarts
class ExclusionFilter(StructureFilter):
'''
ExclusionFilter - excludes mols with substructure matches to `smarts`
Inputs:
- `smarts [list, SmartsCatalog]`: list of smarts strings for filtering or `SmartsCatalog`
- `criteria ['any', 'all', float, int]`: match criteria.
(match any filter, match all filters, match float percent of filters,
match int number of filters)
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
'''
def __init__(self, smarts, criteria='any', score=None, name=None, fail_score=0.):
if name is None:
name = f'Excusion filter, criteria: {criteria}'
super().__init__(smarts, exclude=True, criteria=criteria,
score=score, name=name, fail_score=fail_score)
class KeepFilter(StructureFilter):
'''
KeepFilter - keeps mols with substructure matches to `smarts`
Inputs:
- `smarts [list, SmartsCatalog]`: list of smarts strings for filtering or `SmartsCatalog`
- `criteria ['any', 'all', float, int]`: match criteria.
(match any filter, match all filters, match float percent of filters,
match int number of filters)
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
'''
def __init__(self, smarts, criteria='any', score=None, name=None, fail_score=0.):
if name is None:
name = f'Excusion filter, criteria: {criteria}'
super().__init__(smarts, exclude=False, criteria=criteria,
score=score, name=name, fail_score=fail_score)
smarts = [
'[*]-[#6]1:[#6]:[#6](-[#0]):[#6]:[#6](-[*]):[#6]:1',
'[*]-[#6]1:[#6]:[#6](-[*]):[#6]:[#6]:[#6]:1',
'[*]-[#6]1:[#6]:[#6]:[#6]:[#6]:[#6]:1',
'[*]-[#6]1:[#6]:[#6](-[#7]-[*]):[#6]:[#6]:[#6]:1',
'[#6]1:[#6]:[#7]:[#6]:[#6]:[#6]:1'
]
smiles = [
'c1ccccc1',
'Cc1cc(NC)ccc1',
'Cc1cc(NC)cnc1',
'Cc1cccc(NCc2ccccc2)c1'
]
mols = [to_mol(i) for i in smiles]
f = StructureFilter(smarts, exclude=False, criteria='any')
assert f(mols[1]) == True
catalog = SmartsCatalog(smarts)
f = StructureFilter(catalog, exclude=False, criteria='all')
assert f(mols[1]) == False
f = StructureFilter(smarts, exclude=True, criteria='any')
assert f(mols[1]) == False
f = StructureFilter(smarts, exclude=False, criteria=0.3)
assert f(mols[1]) == True
f = StructureFilter(smarts, exclude=False, criteria=3)
assert f(mols[1]) == True
f = StructureFilter(smarts, exclude=False, criteria=4)
assert f(mols[1]) == False
try:
StructureFilter(smarts, exclude=False, criteria='bla')
output=False
except:
output=True
assert output
###Output
_____no_output_____
###Markdown
Some wrappers for PAINS filters
###Code
# export
class PAINSFilter(ExclusionFilter):
'''
PAINSFilter - excludes mols with substructure matches to PAINS filters
Inputs:
- `criteria ['any', 'all', float, int]`: match criteria.
(match any filter, match all filters, match float percent of filters,
match int number of filters)
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
'''
def __init__(self, criteria='any', score=None, name=None, fail_score=0.):
super().__init__(PAINSCatalog(), criteria, score, name, fail_score)
class PAINSAFilter(ExclusionFilter):
'''
PAINSAFilter - excludes mols with substructure matches to PAINS_A filters
Inputs:
- `criteria ['any', 'all', float, int]`: match criteria.
(match any filter, match all filters, match float percent of filters,
match int number of filters)
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
'''
def __init__(self, criteria='any', score=None, name=None, fail_score=0.):
super().__init__(PAINSACatalog(), criteria, score, name, fail_score)
class PAINSBFilter(ExclusionFilter):
'''
PAINSBFilter - excludes mols with substructure matches to PAINS_B filters
Inputs:
- `criteria ['any', 'all', float, int]`: match criteria.
(match any filter, match all filters, match float percent of filters,
match int number of filters)
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
'''
def __init__(self, criteria='any', score=None, name=None, fail_score=0.):
super().__init__(PAINSBCatalog(), criteria, score, name, fail_score)
class PAINSCFilter(ExclusionFilter):
'''
PAINSCFilter - excludes mols with substructure matches to PAINS_C filters
Inputs:
- `criteria ['any', 'all', float, int]`: match criteria.
(match any filter, match all filters, match float percent of filters,
match int number of filters)
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
'''
def __init__(self, criteria='any', score=None, name=None, fail_score=0.):
super().__init__(PAINSCCatalog(), criteria, score, name, fail_score)
filt = PAINSAFilter(criteria=5)
assert all(filt(mols))
###Output
_____no_output_____
###Markdown
`FPFilter` allows for filtering based on fingerprint similarity. For a given molecule, a fingerprint of `fp_type` is generated and compared to `reference_fps` based on `fp_metric`. Fingerprint similarity scores greater than `fp_thresh` evaluate to `True`.See `FP` for fingerprint types and similarity metrics.
###Code
# export
class FPFilter(Filter):
'''
FPFilter - filters mols based on fingerprint similarity to `reference_smiles`
Inputs:
- `reference_smiles list`list of smiles or `Mol` objects for comparison
- `fp_type str`: fingerprint function. see `FP` for available functions
- `fp_metric str`: fingerprint similarity metric. see `FP` for available metrics
- `criteria ['any', 'all', float, int]`: match criteria.
(match any filter, match all filters, match float percent of filters,
match int number of filters)
- `fp_thresh float`: fingerprint similarity cutoff for defining a match
- `name Optional[str]`: filter name used for repr
- `fail_score [float, int]`: used in `Filter.set_score` if `score_function` is (int, float)
- `score [None, int, float, ScoreFunction]`: see `Filter.set_score`
'''
def __init__(self, reference_fps, fp_type, fp_metric, criteria='any',
fp_thresh=0., score=None, name=None, fail_score=0.):
self.reference_fps = reference_fps
self.fp = FP()
self.fp_type = fp_type
self.fp_metric = fp_metric
self.array_type = self.fp._np_or_rd(reference_fps)
self.get_fp = partial(self.fp.get_fingerprint, fp_type=self.fp_type, output_type=self.array_type)
self.get_similaity = partial(self.fp.fingerprint_similarity,
fps2=self.reference_fps, metric=fp_metric)
if not criteria_check(criteria):
raise ValueError('`criteria` must be `any`, `all`, a float between 0 and 1, or an int')
self.criteria = criteria
self.fp_thresh = fp_thresh
if name is None:
name = f'Fingerprint Filter, {fp_type}, {fp_metric}, {len(reference_fps)} references'
super().__init__(score, name, fail_score=fail_score)
def property_function(self, mol):
fp = self.get_fp(mol)
similarity = self.get_similaity(fp)
return similarity
def criteria_function(self, property_output):
property_output = property_output>=self.fp_thresh
if not is_container(property_output):
property_output = [property_output]
if self.criteria=='any':
output = any(property_output)
elif self.criteria=='all':
output = all(property_output)
elif type(self.criteria)==float:
output = (sum(property_output)/len(property_output))>=self.criteria
else:
output = sum(property_output)>=self.criteria
return output
@classmethod
def from_smiles(cls, reference_smiles, fp_type='ECFP6', fp_metric='tanimoto',
criteria='any', fp_thresh=0., score=None, name=None, fail_score=0,):
'''
creates FPFilter from `reference_smiles`
`reference_smiles` can be a list of smiles or a list of `Mols`
'''
reference_fps = get_fingerprint(reference_smiles, fp_type=fp_type)
return cls(reference_fps, fp_type, fp_metric,
criteria=criteria, fp_thresh=fp_thresh, score=score,
name=name, fail_score=fail_score)
show_doc(FPFilter.from_smiles)
f = FPFilter.from_smiles(smiles, fp_thresh=0.6)
assert f(mols) == [True, True, True, True]
f = FPFilter.from_smiles(smiles, fp_thresh=0.6, criteria='all')
assert f(mols) == [False, False, False, False]
f = FPFilter.from_smiles(smiles[:1], fp_thresh=0.6)
assert f(mols)==[True, False, False, False]
f = FPFilter.from_smiles(smiles[:2], fp_thresh=0.38, criteria=0.3)
assert f(mols) == [True, True, False, True]
f = FPFilter.from_smiles(smiles[:2], fp_thresh=0.07, criteria=2)
assert f(mols) == [True, True, False, True]
# hide
from nbdev.export import notebook2script; notebook2script()
###Output
Converted 00_core.ipynb.
Converted 01_chem.ipynb.
Converted 02_template.filters.ipynb.
Converted 03_template.template.ipynb.
Converted 04_template.blocks.ipynb.
Converted index.ipynb.
Converted template.overview.ipynb.
Converted tutorials.ipynb.
Converted tutorials.structure_enumeration.ipynb.
Converted tutorials.template.advanced.ipynb.
Converted tutorials.template.beginner.ipynb.
Converted tutorials.template.intermediate.ipynb.
|
WIX3001_soft_comp_face_recognition_with_masks.ipynb | ###Markdown
[**Broutonlab**](https://broutonlab.com/) face recognition with masks pipeline [**github repo**](https://github.com/broutonlab/face-id-with-medical-masks) with solution
###Code
#@title <b><font color="red" size="+3">←</font><font color="black" size="+3"> Check GPU resources</font></b>
!nvidia-smi
#@title <b><font color="red" size="+3">←</font><font color="black" size="+3"> Import requirements</font></b>
import os
import sys
import cv2
from matplotlib import pyplot as plt
import sys
import numpy as np
import torch
from torch import nn
from tqdm.notebook import tqdm
from torch.utils.data import DataLoader
%matplotlib inline
#@title <b><font color="red" size="+3">←</font><font color="black" size="+3"> Clone and build face-alignment git repo</font></b>
!git clone https://github.com/1adrianb/face-alignment
%cd face-alignment
!pip install -r requirements.txt
!python setup.py install
import face_alignment
#@title <b><font color="red" size="+3">←</font><font color="black" size="+3"> Download and import face mask SDK</font></b>
!git clone https://github.com/broutonlab/face-id-with-medical-masks.git
%cd face-id-with-medical-masks
from masked_face_sdk.mask_generation_utils import end2end_mask_generation
from masked_face_sdk.pipeline_dataset_loader import PipelineFacesDatasetGenerator
from masked_face_sdk.pipeline_dataset_loader \
import PipelineFacesDatasetGenerator
from masked_face_sdk.neural_network_modules \
import Backbone, ArcFaceLayer, FaceRecognitionModel, resnet18
from masked_face_sdk.training_utils import default_acc_function, test_embedding_net
!gdown --id 1b64prOr4_E8gcD1Q_cVZkFnSzNVfGwU_
!unzip face_recognition_with_masks_dataset.zip
!ls
# Pathes to datasets for face recognition in Keras-like format
root_train_dataset_path = 'test_large/'
root_test_dataset_path = 'test_small/'
#@title <b><font color="red" size="+3">←</font><font color="black" size="+3"> Generate masks database</font></b>
# Generate masks database
!python3 generate_masks_database.py \
--masks-folder=data/masked_faces/ \
--database-file=data/masks_base.json \
--verbose --skip-warnings
#@title <b><font color="red" size="+3">←</font><font color="black" size="+3"> Prepare training dataset</font></b>
# Prepare training dataset
!python3 apply_masks_to_face_recognition_dataset.py \
--face-dataset-folder={root_train_dataset_path} \
--masks-database=data/masks_base.json \
--verbose \
--use-cuda
#@title <b><font color="red" size="+3">←</font><font color="black" size="+3"> Prepare test dataset</font></b>
# Prepare test dataset
!python3 apply_masks_to_face_recognition_dataset.py \
--face-dataset-folder={root_test_dataset_path} \
--masks-database=data/masks_base.json \
--verbose \
--use-cuda
#@title <b><font color="red" size="+3">←</font><font color="black" size="+3"> Initialize constants</font></b>
# Init constants
batch_size = 100
n_jobs = 4
epochs = 3000
image_shape = (112, 112)
embedding_size = 256
device = 'cuda:0'
#@title <b><font color="red" size="+3">←</font><font color="black" size="+3"> Initialize base variables for training</font></b>
# Init base variables for training
generator_train_dataset = PipelineFacesDatasetGenerator(
root_train_dataset_path,
image_shape
)
train_loader = DataLoader(
generator_train_dataset,
batch_size=batch_size,
num_workers=n_jobs,
shuffle=True,
drop_last=True
)
model = FaceRecognitionModel(
backbone=Backbone(
backbone=resnet18(pretrained=True),
embedding_size=embedding_size,
input_shape=(3, image_shape[0], image_shape[1])
),
head=ArcFaceLayer(
embedding_size=embedding_size,
num_classes=generator_train_dataset.num_classes
)
)
model = model.to(device)
loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(params=model.parameters(), lr=0.00001)
#@title <b><font color="red" size="+3">←</font><font color="black" size="+3"> Run test for embedding net</font></b>
print(
'Start accuracy rate = {:.5f}'.format(
test_embedding_net(root_test_dataset_path, image_shape, model, device)
)
)
#@title <b><font color="red" size="+3">←</font><font color="black" size="+3"> Perform training process</font></b>
# Training process
epoch_loss = []
epoch_test_acc = []
for epoch in range(1, epochs + 1):
model.train()
batches_count = len(train_loader)
avg_epoch_loss = 0
avg_epoch_acc = 0
with tqdm(total=batches_count) as pbar:
for i, (_img, _y_true) in enumerate(train_loader):
img = _img.to(device)
y_true = _y_true.to(device)
optimizer.zero_grad()
y_pred = model(img, y_true)
loss = loss_function(
y_pred,
y_true
)
loss.backward()
optimizer.step()
acc = default_acc_function(
y_pred,
torch.nn.functional.one_hot(
y_true,
num_classes=y_pred.size(-1)
).to(y_pred.dtype).to(device)
).numpy()
pbar.postfix = \
'Epoch: {}/{}, loss: {:.8f}, ' \
'avg acc: {:.8f}'.format(
epoch,
epochs,
loss.item(),
acc
)
avg_epoch_loss += \
loss.item() / y_true.size(0) / batches_count
avg_epoch_acc += acc / batches_count
pbar.update(1)
test_acc = test_embedding_net(root_test_dataset_path, image_shape, model, device)
print('Test accuracy rate: {:.5f}'.format(test_acc))
epoch_loss.append(avg_epoch_loss)
epoch_test_acc.append(test_acc)
#@title <b><font color="red" size="+3">←</font><font color="black" size="+3"> Plot the results</font></b>
plt.figure(figsize=(8, 8))
plt.title('Train loss per epoch')
plt.xlabel('Epoch number')
plt.ylabel('Binary crossentropy value')
plt.plot(list(range(1, len(epoch_loss) + 1)), epoch_loss)
plt.figure(figsize=(8, 8))
plt.title('Test accuracy rate per epoch')
plt.xlabel('Epoch number')
plt.ylabel('Accuracy rate')
plt.plot(list(range(1, len(epoch_test_acc) + 1)), epoch_test_acc, color='orange')
plt.show()
###Output
_____no_output_____ |
Position-specificFeaturesLbCpf1.ipynb | ###Markdown
Position-specific feature importance analysis for LbCpf1 Calculation of frequency of nucleotides at each location
###Code
import pandas as pd
OT_data=pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset_features_clean2.csv", encoding="cp1252")
df = pd.DataFrame(OT_data)
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
print(len(POT), len(NOT))
l1=['A_OTSeqPosition1', 'A_OTSeqPosition2', 'A_OTSeqPosition3', 'A_OTSeqPosition4', 'A_OTSeqPosition5', 'A_OTSeqPosition6', 'A_OTSeqPosition7', 'A_OTSeqPosition8', 'A_OTSeqPosition9', 'A_OTSeqPosition10', 'A_OTSeqPosition11', 'A_OTSeqPosition12', 'A_OTSeqPosition13', 'A_OTSeqPosition14', 'A_OTSeqPosition15', 'A_OTSeqPosition16', 'A_OTSeqPosition17', 'A_OTSeqPosition18', 'A_OTSeqPosition19', 'A_OTSeqPosition20', 'A_OTSeqPosition21', 'A_OTSeqPosition22', 'A_OTSeqPosition23', 'A_OTSeqPosition24', 'A_OTSeqPosition25', 'A_OTSeqPosition26', 'A_OTSeqPosition27']
l2=['T_OTSeqPosition1', 'T_OTSeqPosition2', 'T_OTSeqPosition3', 'T_OTSeqPosition4', 'T_OTSeqPosition5', 'T_OTSeqPosition6', 'T_OTSeqPosition7', 'T_OTSeqPosition8', 'T_OTSeqPosition9', 'T_OTSeqPosition10', 'T_OTSeqPosition11', 'T_OTSeqPosition12', 'T_OTSeqPosition13', 'T_OTSeqPosition14', 'T_OTSeqPosition15', 'T_OTSeqPosition16', 'T_OTSeqPosition17', 'T_OTSeqPosition18', 'T_OTSeqPosition19', 'T_OTSeqPosition20', 'T_OTSeqPosition21', 'T_OTSeqPosition22', 'T_OTSeqPosition23', 'T_OTSeqPosition24', 'T_OTSeqPosition25', 'T_OTSeqPosition26', 'T_OTSeqPosition27']
l3=['G_OTSeqPosition1', 'G_OTSeqPosition2', 'G_OTSeqPosition3', 'G_OTSeqPosition4', 'G_OTSeqPosition5', 'G_OTSeqPosition6', 'G_OTSeqPosition7', 'G_OTSeqPosition8', 'G_OTSeqPosition9', 'G_OTSeqPosition10', 'G_OTSeqPosition11', 'G_OTSeqPosition12', 'G_OTSeqPosition13', 'G_OTSeqPosition14', 'G_OTSeqPosition15', 'G_OTSeqPosition16', 'G_OTSeqPosition17', 'G_OTSeqPosition18', 'G_OTSeqPosition19', 'G_OTSeqPosition20', 'G_OTSeqPosition21', 'G_OTSeqPosition22', 'G_OTSeqPosition23', 'G_OTSeqPosition24', 'G_OTSeqPosition25', 'G_OTSeqPosition26', 'G_OTSeqPosition27']
l4=['C_OTSeqPosition1', 'C_OTSeqPosition2', 'C_OTSeqPosition3', 'C_OTSeqPosition4', 'C_OTSeqPosition5', 'C_OTSeqPosition6', 'C_OTSeqPosition7', 'C_OTSeqPosition8', 'C_OTSeqPosition9', 'C_OTSeqPosition10', 'C_OTSeqPosition11', 'C_OTSeqPosition12', 'C_OTSeqPosition13', 'C_OTSeqPosition14', 'C_OTSeqPosition15', 'C_OTSeqPosition16', 'C_OTSeqPosition17', 'C_OTSeqPosition18', 'C_OTSeqPosition19', 'C_OTSeqPosition20', 'C_OTSeqPosition21', 'C_OTSeqPosition22', 'C_OTSeqPosition23', 'C_OTSeqPosition24', 'C_OTSeqPosition25', 'C_OTSeqPosition26', 'C_OTSeqPosition27']
print("positive off-targets")
POT_l1=[]
POT_l2=[]
POT_l3=[]
POT_l4=[]
for i, j, k, l in zip(l1, l2, l3, l4):
t0=POT[i].sum()
t1=POT[j].sum()
t2=POT[k].sum()
t3=POT[l].sum()
POT_A=t0/481
POT_T=t1/481
POT_G=t2/481
POT_C=t3/481
POT_l1.append(POT_A)
POT_l2.append(POT_T)
POT_l3.append(POT_G)
POT_l4.append(POT_C)
print("Position-wise frequency of A in positive off-targets \n", POT_l1, "\n")
print("Position-wise frequency of T in positive off-targets \n", POT_l2, "\n")
print("Position-wise frequency of G in positive off-targets \n", POT_l3, "\n")
print("Position-wise frequency of C in positive off-targets \n", POT_l4, "\n")
print("negative off-targets")
NOT_l1=[]
NOT_l2=[]
NOT_l3=[]
NOT_l4=[]
for i, j, k, l in zip(l1, l2, l3, l4):
t0=NOT[i].sum()
t1=NOT[j].sum()
t2=NOT[k].sum()
t3=NOT[l].sum()
NOT_A=t0/58474
NOT_T=t1/58474
NOT_G=t2/58474
NOT_C=t3/58474
NOT_l1.append(NOT_A)
NOT_l2.append(NOT_T)
NOT_l3.append(NOT_G)
NOT_l4.append(NOT_C)
print("Position-wise frequency of A in negative off-targets \n", POT_l1, "\n")
print("Position-wise frequency of T in negative off-targets \n", POT_l2, "\n")
print("Position-wise frequency of G in negative off-targets \n", POT_l3, "\n")
print("Position-wise frequency of C in negative off-targets \n", POT_l4, "\n")
###Output
524 80304
positive off-targets
Position-wise frequency of A in positive off-targets
[0.02079002079002079, 0.0, 0.002079002079002079, 0.21621621621621623, 0.04365904365904366, 0.07692307692307693, 0.07068607068607069, 0.42203742203742206, 0.07276507276507277, 0.49480249480249483, 0.1600831600831601, 0.1683991683991684, 0.4407484407484408, 0.17255717255717257, 0.7505197505197505, 0.18087318087318088, 0.3762993762993763, 0.14137214137214138, 0.5550935550935551, 0.5509355509355509, 0.1787941787941788, 0.5343035343035343, 0.27442827442827444, 0.32224532224532226, 0.6943866943866944, 0.25363825363825365, 0.2203742203742204]
Position-wise frequency of T in positive off-targets
[0.7193347193347194, 1.0727650727650728, 1.0602910602910602, 0.12681912681912683, 0.1101871101871102, 0.3596673596673597, 0.15384615384615385, 0.5280665280665281, 0.340956340956341, 0.04781704781704782, 0.4885654885654886, 0.340956340956341, 0.15176715176715178, 0.19334719334719336, 0.08316008316008316, 0.2494802494802495, 0.08316008316008316, 0.1496881496881497, 0.13305613305613306, 0.288981288981289, 0.08523908523908524, 0.35550935550935553, 0.28482328482328484, 0.13097713097713098, 0.1995841995841996, 0.21205821205821207, 0.17255717255717257]
Position-wise frequency of G in positive off-targets
[0.3180873180873181, 0.0, 0.004158004158004158, 0.49064449064449067, 0.6486486486486487, 0.5426195426195426, 0.6632016632016632, 0.018711018711018712, 0.4864864864864865, 0.3700623700623701, 0.3201663201663202, 0.14553014553014554, 0.158004158004158, 0.44490644490644493, 0.14553014553014554, 0.0893970893970894, 0.16632016632016633, 0.1787941787941788, 0.15384615384615385, 0.20582120582120583, 0.2993762993762994, 0.08316008316008316, 0.12681912681912683, 0.15592515592515593, 0.16632016632016633, 0.6237006237006237, 0.6216216216216216]
Position-wise frequency of C in positive off-targets
[0.031185031185031187, 0.016632016632016633, 0.02286902286902287, 0.25571725571725573, 0.2869022869022869, 0.1101871101871102, 0.20166320166320167, 0.12058212058212059, 0.1891891891891892, 0.17671517671517672, 0.12058212058212059, 0.43451143451143454, 0.3388773388773389, 0.2785862785862786, 0.1101871101871102, 0.5675675675675675, 0.46361746361746364, 0.6195426195426196, 0.24740124740124741, 0.04365904365904366, 0.525987525987526, 0.11642411642411643, 0.40124740124740127, 0.4781704781704782, 0.2598752598752599, 0.23076923076923078, 0.30561330561330563]
negative off-targets
Position-wise frequency of A in negative off-targets
[0.02079002079002079, 0.0, 0.002079002079002079, 0.21621621621621623, 0.04365904365904366, 0.07692307692307693, 0.07068607068607069, 0.42203742203742206, 0.07276507276507277, 0.49480249480249483, 0.1600831600831601, 0.1683991683991684, 0.4407484407484408, 0.17255717255717257, 0.7505197505197505, 0.18087318087318088, 0.3762993762993763, 0.14137214137214138, 0.5550935550935551, 0.5509355509355509, 0.1787941787941788, 0.5343035343035343, 0.27442827442827444, 0.32224532224532226, 0.6943866943866944, 0.25363825363825365, 0.2203742203742204]
Position-wise frequency of T in negative off-targets
[0.7193347193347194, 1.0727650727650728, 1.0602910602910602, 0.12681912681912683, 0.1101871101871102, 0.3596673596673597, 0.15384615384615385, 0.5280665280665281, 0.340956340956341, 0.04781704781704782, 0.4885654885654886, 0.340956340956341, 0.15176715176715178, 0.19334719334719336, 0.08316008316008316, 0.2494802494802495, 0.08316008316008316, 0.1496881496881497, 0.13305613305613306, 0.288981288981289, 0.08523908523908524, 0.35550935550935553, 0.28482328482328484, 0.13097713097713098, 0.1995841995841996, 0.21205821205821207, 0.17255717255717257]
Position-wise frequency of G in negative off-targets
[0.3180873180873181, 0.0, 0.004158004158004158, 0.49064449064449067, 0.6486486486486487, 0.5426195426195426, 0.6632016632016632, 0.018711018711018712, 0.4864864864864865, 0.3700623700623701, 0.3201663201663202, 0.14553014553014554, 0.158004158004158, 0.44490644490644493, 0.14553014553014554, 0.0893970893970894, 0.16632016632016633, 0.1787941787941788, 0.15384615384615385, 0.20582120582120583, 0.2993762993762994, 0.08316008316008316, 0.12681912681912683, 0.15592515592515593, 0.16632016632016633, 0.6237006237006237, 0.6216216216216216]
Position-wise frequency of C in negative off-targets
[0.031185031185031187, 0.016632016632016633, 0.02286902286902287, 0.25571725571725573, 0.2869022869022869, 0.1101871101871102, 0.20166320166320167, 0.12058212058212059, 0.1891891891891892, 0.17671517671517672, 0.12058212058212059, 0.43451143451143454, 0.3388773388773389, 0.2785862785862786, 0.1101871101871102, 0.5675675675675675, 0.46361746361746364, 0.6195426195426196, 0.24740124740124741, 0.04365904365904366, 0.525987525987526, 0.11642411642411643, 0.40124740124740127, 0.4781704781704782, 0.2598752598752599, 0.23076923076923078, 0.30561330561330563]
###Markdown
Enrichment analysis to study the position-specific favour and disfavour of Nucleotides
###Code
import pandas as pd
l1=['C_OTSeqPosition1','G_OTSeqPosition1', 'T_OTSeqPosition1', 'A_OTSeqPosition1', 'C_OTSeqPosition2', 'G_OTSeqPosition2', 'T_OTSeqPosition2', 'A_OTSeqPosition2', 'C_OTSeqPosition3', 'G_OTSeqPosition3', 'T_OTSeqPosition3', 'A_OTSeqPosition3', 'C_OTSeqPosition4', 'G_OTSeqPosition4', 'T_OTSeqPosition4', 'A_OTSeqPosition4', 'C_OTSeqPosition5', 'G_OTSeqPosition5', 'T_OTSeqPosition5', 'A_OTSeqPosition5', 'C_OTSeqPosition6', 'G_OTSeqPosition6', 'T_OTSeqPosition6', 'A_OTSeqPosition6', 'C_OTSeqPosition7', 'G_OTSeqPosition7', 'T_OTSeqPosition7', 'A_OTSeqPosition7', 'C_OTSeqPosition8', 'G_OTSeqPosition8', 'T_OTSeqPosition8', 'A_OTSeqPosition8', 'C_OTSeqPosition9', 'G_OTSeqPosition9', 'T_OTSeqPosition9', 'A_OTSeqPosition9', 'C_OTSeqPosition10', 'G_OTSeqPosition10', 'T_OTSeqPosition10', 'A_OTSeqPosition10', 'C_OTSeqPosition11', 'G_OTSeqPosition11', 'T_OTSeqPosition11', 'A_OTSeqPosition11', 'C_OTSeqPosition12', 'G_OTSeqPosition12', 'T_OTSeqPosition12', 'A_OTSeqPosition12', 'C_OTSeqPosition13', 'G_OTSeqPosition13', 'T_OTSeqPosition13', 'A_OTSeqPosition13', 'C_OTSeqPosition14', 'G_OTSeqPosition14', 'T_OTSeqPosition14', 'A_OTSeqPosition14', 'C_OTSeqPosition15', 'G_OTSeqPosition15', 'T_OTSeqPosition15', 'A_OTSeqPosition15', 'C_OTSeqPosition16', 'G_OTSeqPosition16', 'T_OTSeqPosition16', 'A_OTSeqPosition16', 'C_OTSeqPosition17', 'G_OTSeqPosition17', 'T_OTSeqPosition17', 'A_OTSeqPosition17', 'C_OTSeqPosition18', 'G_OTSeqPosition18', 'T_OTSeqPosition18', 'A_OTSeqPosition18', 'C_OTSeqPosition19', 'G_OTSeqPosition19', 'T_OTSeqPosition19', 'A_OTSeqPosition19', 'C_OTSeqPosition20', 'G_OTSeqPosition20', 'T_OTSeqPosition20', 'A_OTSeqPosition20', 'C_OTSeqPosition21', 'G_OTSeqPosition21', 'T_OTSeqPosition21', 'A_OTSeqPosition21', 'C_OTSeqPosition22', 'G_OTSeqPosition22', 'T_OTSeqPosition22', 'A_OTSeqPosition22', 'C_OTSeqPosition23', 'G_OTSeqPosition23', 'T_OTSeqPosition23', 'A_OTSeqPosition23', 'C_OTSeqPosition24', 'G_OTSeqPosition24', 'T_OTSeqPosition24', 'A_OTSeqPosition24', 'C_OTSeqPosition25', 'G_OTSeqPosition25', 'T_OTSeqPosition25', 'A_OTSeqPosition25', 'C_OTSeqPosition26', 'G_OTSeqPosition26', 'T_OTSeqPosition26', 'A_OTSeqPosition26', 'C_OTSeqPosition27', 'G_OTSeqPosition27', 'T_OTSeqPosition27', 'A_OTSeqPosition27']
df= pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv")
for i in l1:
#print(df.groupby("Y")[i].describe())
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
from scipy import stats
print(i)
print(stats.shapiro(POT[i]))
print(stats.shapiro(NOT[i]))
print(stats.ttest_ind(POT[i], NOT[i], equal_var = False))
print("\n")
import pandas as pd
OT_data=pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv", encoding="cp1252")
df = pd.DataFrame(OT_data)
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
print(len(df))
l1=['C_OTSeqPosition1','G_OTSeqPosition1', 'T_OTSeqPosition1', 'A_OTSeqPosition1', 'C_OTSeqPosition2', 'G_OTSeqPosition2', 'T_OTSeqPosition2', 'A_OTSeqPosition2', 'C_OTSeqPosition3', 'G_OTSeqPosition3', 'T_OTSeqPosition3', 'A_OTSeqPosition3', 'C_OTSeqPosition4', 'G_OTSeqPosition4', 'T_OTSeqPosition4', 'A_OTSeqPosition4', 'C_OTSeqPosition5', 'G_OTSeqPosition5', 'T_OTSeqPosition5', 'A_OTSeqPosition5', 'C_OTSeqPosition6', 'G_OTSeqPosition6', 'T_OTSeqPosition6', 'A_OTSeqPosition6', 'C_OTSeqPosition7', 'G_OTSeqPosition7', 'T_OTSeqPosition7', 'A_OTSeqPosition7', 'C_OTSeqPosition8', 'G_OTSeqPosition8', 'T_OTSeqPosition8', 'A_OTSeqPosition8', 'C_OTSeqPosition9', 'G_OTSeqPosition9', 'T_OTSeqPosition9', 'A_OTSeqPosition9', 'C_OTSeqPosition10', 'G_OTSeqPosition10', 'T_OTSeqPosition10', 'A_OTSeqPosition10', 'C_OTSeqPosition11', 'G_OTSeqPosition11', 'T_OTSeqPosition11', 'A_OTSeqPosition11', 'C_OTSeqPosition12', 'G_OTSeqPosition12', 'T_OTSeqPosition12', 'A_OTSeqPosition12', 'C_OTSeqPosition13', 'G_OTSeqPosition13', 'T_OTSeqPosition13', 'A_OTSeqPosition13', 'C_OTSeqPosition14', 'G_OTSeqPosition14', 'T_OTSeqPosition14', 'A_OTSeqPosition14', 'C_OTSeqPosition15', 'G_OTSeqPosition15', 'T_OTSeqPosition15', 'A_OTSeqPosition15', 'C_OTSeqPosition16', 'G_OTSeqPosition16', 'T_OTSeqPosition16', 'A_OTSeqPosition16', 'C_OTSeqPosition17', 'G_OTSeqPosition17', 'T_OTSeqPosition17', 'A_OTSeqPosition17', 'C_OTSeqPosition18', 'G_OTSeqPosition18', 'T_OTSeqPosition18', 'A_OTSeqPosition18', 'C_OTSeqPosition19', 'G_OTSeqPosition19', 'T_OTSeqPosition19', 'A_OTSeqPosition19', 'C_OTSeqPosition20', 'G_OTSeqPosition20', 'T_OTSeqPosition20', 'A_OTSeqPosition20', 'C_OTSeqPosition21', 'G_OTSeqPosition21', 'T_OTSeqPosition21', 'A_OTSeqPosition21', 'C_OTSeqPosition22', 'G_OTSeqPosition22', 'T_OTSeqPosition22', 'A_OTSeqPosition22', 'C_OTSeqPosition23', 'G_OTSeqPosition23', 'T_OTSeqPosition23', 'A_OTSeqPosition23', 'C_OTSeqPosition24', 'G_OTSeqPosition24', 'T_OTSeqPosition24', 'A_OTSeqPosition24', 'C_OTSeqPosition25', 'G_OTSeqPosition25', 'T_OTSeqPosition25', 'A_OTSeqPosition25', 'C_OTSeqPosition26', 'G_OTSeqPosition26', 'T_OTSeqPosition26', 'A_OTSeqPosition26', 'C_OTSeqPosition27', 'G_OTSeqPosition27', 'T_OTSeqPosition27', 'A_OTSeqPosition27']
print("positive off-targets")
POT_l1=[]
for i in l1:
total = POT[i].sum()
POT_ratio=total/524
POT_l1.append(POT_ratio)
print(POT_l1)
print("negative off-targets")
NOT_l1=[]
for i in l1:
total = NOT[i].sum()
NOT_ratio=total/525
NOT_l1.append(NOT_ratio)
print(NOT_l1)
enrichment_ratio=[]
for i, j in zip(POT_l1, NOT_l1):
enrichment_ratio1=i/j
enrichment_ratio.append(enrichment_ratio1)
print(enrichment_ratio)
###Output
[0.16335462993693992, 3.193583015267176, 1.1555343511450382, 0.11787157611136058, 0.07220961419434702, 0.0, 1.752490619743822, 0.0, 0.2160978895374944, 0.027449545121823692, 1.8249045801526718, 0.0082802346855088, 1.559933326891487, 1.5353920888272032, 0.34143247046782377, 0.9387249845265112, 1.3555231252806466, 2.2984957341715315, 0.3382238537462926, 0.17830573165998187, 0.5900127226463104, 1.5203377418782176, 1.1182590494951983, 0.3599088416215816, 0.9255725190839694, 3.591109872201733, 0.333969465648855, 0.3372760940216159, 0.3631917938931298, 0.15028625954198474, 1.8308254160030755, 1.2711712786259541, 0.6606787255227349, 2.416974895726765, 0.9553080063909107, 0.3130963740458015, 0.6923757214671383, 1.748428378985182, 0.1578348844504862, 1.5791668773065062, 0.5929661941112323, 1.5585241730279897, 1.2590827448258972, 0.5590358446730833, 2.7552480916030535, 0.6679389312977099, 0.9898372114411846, 0.4718289543759986, 1.1665076335877862, 0.6190653509588531, 0.45999567910125305, 2.145500809622947, 1.0094415427882681, 1.7290999753755232, 0.6515907756365771, 0.6872594788972304, 0.40535225219975524, 0.41013794027052364, 0.4660039055565418, 2.699171129087387, 3.1439194524874967, 0.43082061068702293, 0.7467640225688683, 0.5009541984732825, 2.4285388317291736, 1.1132315521628497, 0.28831896315009065, 0.8356931790199458, 3.245311981413873, 0.7121001829537569, 0.4425607642954152, 0.483189865194088, 1.3548534004163775, 0.5148695928753181, 0.5129770992366413, 1.6512934690415606, 0.3049286425489545, 0.6122773536895675, 0.9222865375865731, 1.9239545303684038, 2.460998295412436, 1.1825804029533225, 0.47765400319545537, 0.41425058719906044, 0.36197980792908147, 0.4714863044454423, 1.6162861875270058, 1.488384150377267, 1.5106900047709924, 0.6643088284102223, 1.2828172932867234, 0.6782149148561362, 1.814479773997716, 1.295571202948144, 0.601145038167939, 0.663657271481699, 0.9487768910478834, 0.6463925141590741, 0.5937234944868534, 1.007943989699255, 1.020292037257511, 1.5180430256766133, 0.7514312977099237, 0.3981525225650844, 1.2171944987697938, 1.7621800628648405, 0.5774888676844783, 0.33715012722646315]
###Markdown
mismatch distribution analysis LbCpf1
###Code
import pandas as pd
OT_data=pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset_features_POT.csv", index_col=[0], encoding="cp1252")
df = pd.DataFrame(OT_data)
print(len(df))
l1=['mismatch_POS1', 'mismatch_POS2', 'mismatch_POS3', 'mismatch_POS4', 'mismatch_POS5', 'mismatch_POS6', 'mismatch_POS7', 'mismatch_POS8', 'mismatch_POS9', 'mismatch_POS10', 'mismatch_POS11', 'mismatch_POS12', 'mismatch_POS13', 'mismatch_POS14', 'mismatch_POS15', 'mismatch_POS16', 'mismatch_POS17', 'mismatch_POS18', 'mismatch_POS19', 'mismatch_POS20', 'mismatch_POS21', 'mismatch_POS22', 'mismatch_POS23', 'mismatch_POS24', 'mismatch_POS25', 'mismatch_POS26', 'mismatch_POS27']
for i in l1:
total = df[i].sum()
print(total/524)
import pandas as pd
from scipy import stats
from scipy.stats import sem
OT_data1=pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv", index_col=[0], encoding="cp1252")
l2=[]
l3=[]
df = pd.DataFrame(OT_data1)
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
#print(len(df))
l1=['mismatch_POS1', 'mismatch_POS2', 'mismatch_POS3', 'mismatch_POS4', 'mismatch_POS5', 'mismatch_POS6', 'mismatch_POS7', 'mismatch_POS8', 'mismatch_POS9', 'mismatch_POS10', 'mismatch_POS11', 'mismatch_POS12', 'mismatch_POS13', 'mismatch_POS14', 'mismatch_POS15', 'mismatch_POS16', 'mismatch_POS17', 'mismatch_POS18', 'mismatch_POS19', 'mismatch_POS20', 'mismatch_POS21', 'mismatch_POS22', 'mismatch_POS23', 'mismatch_POS24', 'mismatch_POS25', 'mismatch_POS26', 'mismatch_POS27']
for i in l1:
total = POT[i].sum()
l2.append(total/481)
l3.append(sem(POT[i]))
print("\n Positive off-target \n")
print(l2)
print(l3)
l2=[]
l3=[]
for i in l1:
total = NOT[i].sum()
l2.append(total/481)
l3.append(sem(NOT[i]))
print("\n Negative off-targets \n")
print(l2)
print(l3)
import pandas as pd
l1=['mismatch_POS1', 'mismatch_POS2', 'mismatch_POS3', 'mismatch_POS4', 'mismatch_POS5', 'mismatch_POS6', 'mismatch_POS7', 'mismatch_POS8', 'mismatch_POS9', 'mismatch_POS10', 'mismatch_POS11', 'mismatch_POS12', 'mismatch_POS13', 'mismatch_POS14', 'mismatch_POS15', 'mismatch_POS16', 'mismatch_POS17', 'mismatch_POS18', 'mismatch_POS19', 'mismatch_POS20', 'mismatch_POS21', 'mismatch_POS22', 'mismatch_POS23', 'mismatch_POS24', 'mismatch_POS25', 'mismatch_POS26', 'mismatch_POS27']
df= pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv")
for i in l1:
#print(df.groupby("Y")[i].describe())
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
from scipy import stats
print(i)
print(stats.shapiro(POT[i]))
print(stats.shapiro(NOT[i]))
print(stats.ttest_ind(POT[i], NOT[i], equal_var = False))
print("\n")
###Output
mismatch_POS1
ShapiroResult(statistic=0.5980409979820251, pvalue=4.545464738580978e-33)
ShapiroResult(statistic=1.0, pvalue=1.0)
Ttest_indResult(statistic=16.402981542030954, pvalue=4.351230827635782e-49)
mismatch_POS2
ShapiroResult(statistic=0.09859353303909302, pvalue=5.044674471569341e-44)
ShapiroResult(statistic=0.4875909090042114, pvalue=3.2086766322371485e-36)
Ttest_indResult(statistic=-10.035483354033676, pvalue=4.5529833387280975e-22)
mismatch_POS3
ShapiroResult(statistic=0.14687281847000122, pvalue=3.1389085600875902e-43)
ShapiroResult(statistic=0.5262457132339478, pvalue=3.4030871401104757e-35)
Ttest_indResult(statistic=-10.553042470709938, pvalue=3.39721425597659e-24)
mismatch_POS4
ShapiroResult(statistic=0.5715742111206055, pvalue=7.106134724611875e-34)
ShapiroResult(statistic=0.6336241960525513, pvalue=6.090118094428014e-32)
Ttest_indResult(statistic=8.50493910900247, pvalue=6.257747926648364e-17)
mismatch_POS5
ShapiroResult(statistic=0.21142542362213135, pvalue=4.2235135714749987e-42)
ShapiroResult(statistic=0.5363994836807251, pvalue=6.492892982317941e-35)
Ttest_indResult(statistic=-9.631069985687448, pvalue=8.665821859206207e-21)
mismatch_POS6
ShapiroResult(statistic=0.3047347068786621, pvalue=2.5119255881947373e-40)
ShapiroResult(statistic=0.5646729469299316, pvalue=4.174961041487286e-34)
Ttest_indResult(statistic=-8.747457808363327, pvalue=1.1241583873424401e-17)
mismatch_POS7
ShapiroResult(statistic=0.27826154232025146, pvalue=7.544590931924815e-41)
ShapiroResult(statistic=0.5591868162155151, pvalue=2.8879660005372157e-34)
Ttest_indResult(statistic=-9.13026344067856, pvalue=5.045535120311492e-19)
mismatch_POS8
ShapiroResult(statistic=0.1996554136276245, pvalue=2.596606054393886e-42)
ShapiroResult(statistic=0.51545250415802, pvalue=1.7334300022649944e-35)
Ttest_indResult(statistic=-9.032968817012472, pvalue=1.3796949292208198e-18)
mismatch_POS9
ShapiroResult(statistic=0.3405500650405884, pvalue=1.3592511026042866e-39)
ShapiroResult(statistic=0.590776801109314, pvalue=2.5422603669645774e-33)
Ttest_indResult(statistic=-9.323573141114457, pvalue=8.648976315000489e-20)
mismatch_POS10
ShapiroResult(statistic=0.3725109100341797, pvalue=6.542375263747384e-39)
ShapiroResult(statistic=0.5428290367126465, pvalue=9.8325832095713e-35)
Ttest_indResult(statistic=-5.869378290806184, pvalue=6.014383584634025e-09)
mismatch_POS11
ShapiroResult(statistic=0.27361589670181274, pvalue=6.1320820798854e-41)
ShapiroResult(statistic=0.5428290367126465, pvalue=9.8325832095713e-35)
Ttest_indResult(statistic=-8.469217668230023, pvalue=1.0939947136399377e-16)
mismatch_POS12
ShapiroResult(statistic=0.3171526789665222, pvalue=4.473743438249481e-40)
ShapiroResult(statistic=0.5809038877487183, pvalue=1.2702350629012368e-33)
Ttest_indResult(statistic=-9.327523518456834, pvalue=8.759124113192768e-20)
mismatch_POS13
ShapiroResult(statistic=0.321181058883667, pvalue=5.404906267793322e-40)
ShapiroResult(statistic=0.520932137966156, pvalue=2.437697194243621e-35)
Ttest_indResult(statistic=-6.330313332930041, pvalue=3.8145906685106407e-10)
mismatch_POS14
ShapiroResult(statistic=0.19358372688293457, pvalue=2.0248762809493607e-42)
ShapiroResult(statistic=0.49595433473587036, pvalue=5.281638391517101e-36)
Ttest_indResult(statistic=-8.43751656169058, pvalue=1.6349457805699556e-16)
mismatch_POS15
ShapiroResult(statistic=0.2171357274055481, pvalue=5.359966626042425e-42)
ShapiroResult(statistic=0.5078887939453125, pvalue=1.0882608174725509e-35)
Ttest_indResult(statistic=-8.354632422271393, pvalue=2.9601823599821803e-16)
mismatch_POS16
ShapiroResult(statistic=0.4509754776954651, pvalue=4.1692965491631315e-37)
ShapiroResult(statistic=0.5646729469299316, pvalue=4.174961041487286e-34)
Ttest_indResult(statistic=-4.527146500512257, pvalue=6.687957066430591e-06)
mismatch_POS17
ShapiroResult(statistic=0.49637508392333984, pvalue=5.782361031078749e-36)
ShapiroResult(statistic=0.5474821329116821, pvalue=1.331623648251748e-34)
Ttest_indResult(statistic=-2.1018261208607583, pvalue=0.03580847323366071)
mismatch_POS18
ShapiroResult(statistic=0.4837043285369873, pvalue=2.72418771028346e-36)
ShapiroResult(statistic=0.5712023973464966, pvalue=6.505468131601225e-34)
Ttest_indResult(statistic=-3.742132355753197, pvalue=0.00019256762206707883)
mismatch_POS19
ShapiroResult(statistic=0.3004802465438843, pvalue=2.065219663737272e-40)
ShapiroResult(statistic=0.5865209102630615, pvalue=1.8819875778305986e-33)
Ttest_indResult(statistic=-10.100796710561255, pvalue=1.0092202052116592e-22)
mismatch_POS20
ShapiroResult(statistic=0.1873791217803955, pvalue=1.5722568769724448e-42)
ShapiroResult(statistic=0.5577765703201294, pvalue=2.62844899029931e-34)
Ttest_indResult(statistic=-11.12988680747007, pvalue=1.2288527626529785e-26)
mismatch_POS21
ShapiroResult(statistic=0.2828368544578552, pvalue=9.2637038879585e-41)
ShapiroResult(statistic=0.5633245706558228, pvalue=3.8120583847395983e-34)
Ttest_indResult(statistic=-9.227265600274352, pvalue=2.2107992264025876e-19)
mismatch_POS22
ShapiroResult(statistic=0.29616451263427734, pvalue=1.6948704926008662e-40)
ShapiroResult(statistic=0.5619610548019409, pvalue=3.4779959767941067e-34)
Ttest_indResult(statistic=-8.825253760110444, pvalue=6.056382020360444e-18)
mismatch_POS23
ShapiroResult(statistic=0.6271321773529053, pvalue=3.926711254804607e-32)
ShapiroResult(statistic=0.5135911703109741, pvalue=1.5449841381206474e-35)
Ttest_indResult(statistic=6.981051599065322, pvalue=5.279226264671945e-12)
mismatch_POS24
ShapiroResult(statistic=0.4748212695121765, pvalue=1.6215818654518646e-36)
ShapiroResult(statistic=0.624015212059021, pvalue=2.9139499909214554e-32)
Ttest_indResult(statistic=-8.046827851845908, pvalue=2.4033858315139698e-15)
mismatch_POS25
ShapiroResult(statistic=0.4217395782470703, pvalue=8.406314938406276e-38)
ShapiroResult(statistic=0.49799227714538574, pvalue=5.9697370116928556e-36)
Ttest_indResult(statistic=-2.578569291799921, pvalue=0.010059068535439693)
mismatch_POS26
ShapiroResult(statistic=0.5447965860366821, pvalue=1.1910701101481636e-34)
ShapiroResult(statistic=0.4480331540107727, pvalue=3.309488510045973e-37)
Ttest_indResult(statistic=3.663499698735858, pvalue=0.00026158808574389564)
mismatch_POS27
ShapiroResult(statistic=0.4608069062232971, pvalue=7.255901027498474e-37)
ShapiroResult(statistic=1.0, pvalue=1.0)
Ttest_indResult(statistic=10.553654799624137, pvalue=9.779578124412288e-24)
###Markdown
Position specific mismatch type analysis LbCpf1 mismatch at position 4
###Code
import pandas as pd
from scipy import stats
from scipy.stats import sem
OT_data1=pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv", index_col=[0], encoding="cp1252")
l2=[]
l3=[]
df = pd.DataFrame(OT_data1)
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
print(len(POT))
print(len(NOT))
#print(len(df))
l1=['MM_type_A–T_POS4', 'MM_type_A–C_POS4', 'MM_type_A–G_POS4', 'MM_type_T–C_POS4', 'MM_type_T–G_POS4', 'MM_type_T–A_POS4', 'MM_type_G–A_POS4', 'MM_type_G–T_POS4', 'MM_type_G–C_POS4', 'MM_type_C–A_POS4', 'MM_type_C–T_POS4', 'MM_type_C–G_POS4', 'MM_type_other_POS4']
for i in l1:
total = POT[i].sum()
l2.append(total/524)
l3.append(sem(POT[i]))
print("\n Positive off-target \n")
print(l2)
print(l3)
l2=[]
l3=[]
for i in l1:
total = NOT[i].sum()
l2.append(total/525)
l3.append(sem(NOT[i]))
print("\n Negative off-targets \n")
print(l2)
print(l3)
import pandas as pd
df= pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv", index_col=[0], encoding="cp1252")
l1=['MM_type_A–T_POS4', 'MM_type_A–C_POS4', 'MM_type_A–G_POS4', 'MM_type_T–C_POS4', 'MM_type_T–G_POS4', 'MM_type_T–A_POS4', 'MM_type_G–A_POS4', 'MM_type_G–T_POS4', 'MM_type_G–C_POS4', 'MM_type_C–A_POS4', 'MM_type_C–T_POS4', 'MM_type_C–G_POS4', 'MM_type_other_POS4']
for i in l1:
#print(df.groupby("Y")[i].describe())
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
from scipy import stats
print(i)
print(stats.shapiro(POT[i]))
print(stats.shapiro(NOT[i]))
print(stats.ttest_ind(POT[i], NOT[i], equal_var = False))
print("\n")
###Output
MM_type_A–T_POS4
ShapiroResult(statistic=0.02032226324081421, pvalue=2.802596928649634e-45)
ShapiroResult(statistic=0.1538635492324829, pvalue=3.839557792249999e-43)
Ttest_indResult(statistic=-3.543757395791489, pvalue=0.0004253816268768453)
MM_type_A–C_POS4
ShapiroResult(statistic=0.04745042324066162, pvalue=8.407790785948902e-45)
ShapiroResult(statistic=0.05888634920120239, pvalue=1.1210387714598537e-44)
Ttest_indResult(statistic=-0.3764182676987785, pvalue=0.7066837079381533)
MM_type_A–G_POS4
ShapiroResult(statistic=0.20560038089752197, pvalue=3.316873465056842e-42)
ShapiroResult(statistic=0.05888634920120239, pvalue=1.1210387714598537e-44)
Ttest_indResult(statistic=3.728097448371174, pvalue=0.0002083999243724627)
MM_type_T–C_POS4
ShapiroResult(statistic=0.13947057723999023, pvalue=2.3541814200656927e-43)
ShapiroResult(statistic=0.08930468559265137, pvalue=3.2229864679470793e-44)
Ttest_indResult(statistic=1.3584387895808356, pvalue=0.1746429721256183)
MM_type_T–G_POS4
ShapiroResult(statistic=0.16106730699539185, pvalue=5.479076995510035e-43)
ShapiroResult(statistic=0.27329450845718384, pvalue=5.63125800873571e-41)
Ttest_indResult(statistic=-2.8470951223060843, pvalue=0.004509761193404014)
MM_type_T–A_POS4
ShapiroResult(statistic=0.12396728992462158, pvalue=1.3032075718220799e-43)
ShapiroResult(statistic=0.11566287279129028, pvalue=8.828180325246348e-44)
Ttest_indResult(statistic=0.22462928282862302, pvalue=0.8223116195471276)
MM_type_G–A_POS4
ShapiroResult(statistic=0.10737556219100952, pvalue=6.866362475191604e-44)
ShapiroResult(statistic=0.08930468559265137, pvalue=3.2229864679470793e-44)
Ttest_indResult(statistic=0.5072182256589852, pvalue=0.612110215111503)
MM_type_G–T_POS4
ShapiroResult(statistic=1.0, pvalue=1.0)
ShapiroResult(statistic=0.07971543073654175, pvalue=2.2420775429197073e-44)
Ttest_indResult(statistic=-2.461260544560062, pvalue=0.014166175898024254)
MM_type_G–C_POS4
ShapiroResult(statistic=0.04745042324066162, pvalue=8.407790785948902e-45)
ShapiroResult(statistic=0.05888634920120239, pvalue=1.1210387714598537e-44)
Ttest_indResult(statistic=-0.3764182676987785, pvalue=0.7066837079381533)
MM_type_C–A_POS4
ShapiroResult(statistic=0.11582159996032715, pvalue=9.528829557408756e-44)
ShapiroResult(statistic=0.06961178779602051, pvalue=1.5414283107572988e-44)
Ttest_indResult(statistic=1.3034473682309973, pvalue=0.19274012913393337)
MM_type_C–T_POS4
ShapiroResult(statistic=1.0, pvalue=1.0)
ShapiroResult(statistic=0.06961178779602051, pvalue=1.5414283107572988e-44)
Ttest_indResult(statistic=-2.2446517632945957, pvalue=0.025206931337515046)
MM_type_C–G_POS4
ShapiroResult(statistic=0.13947057723999023, pvalue=2.3541814200656927e-43)
ShapiroResult(statistic=0.047379910945892334, pvalue=7.006492321624085e-45)
Ttest_indResult(statistic=2.5269133673501614, pvalue=0.0117100154511772)
MM_type_other_POS4
ShapiroResult(statistic=0.6363593339920044, pvalue=8.002644464479999e-32)
ShapiroResult(statistic=0.5412482023239136, pvalue=8.874937578903894e-35)
Ttest_indResult(statistic=8.912546412252281, pvalue=2.231506398656497e-18)
###Markdown
mismatch at position 16
###Code
import pandas as pd
from scipy import stats
from scipy.stats import sem
OT_data1=pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv", index_col=[0], encoding="cp1252")
l2=[]
l3=[]
df = pd.DataFrame(OT_data1)
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
print(len(POT))
print(len(NOT))
#print(len(df))
l1=['MM_type_A–T_POS16', 'MM_type_A–C_POS16', 'MM_type_A–G_POS16', 'MM_type_T–C_POS16', 'MM_type_T–G_POS16', 'MM_type_T–A_POS16', 'MM_type_G–A_POS16', 'MM_type_G–T_POS16', 'MM_type_G–C_POS16', 'MM_type_C–A_POS16', 'MM_type_C–T_POS16', 'MM_type_C–G_POS16', 'MM_type_other_POS16']
for i in l1:
total = POT[i].sum()
l2.append(total/524)
l3.append(sem(POT[i]))
print("\n Positive off-target \n")
print(l2)
print(l3)
l2=[]
l3=[]
for i in l1:
total = NOT[i].sum()
l2.append(total/525)
l3.append(sem(NOT[i]))
print("\n Negative off-targets \n")
print(l2)
print(l3)
import pandas as pd
df= pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv", index_col=[0], encoding="cp1252")
l1=['MM_type_A–T_POS16', 'MM_type_A–C_POS16', 'MM_type_A–G_POS16', 'MM_type_T–C_POS16', 'MM_type_T–G_POS16', 'MM_type_T–A_POS16', 'MM_type_G–A_POS16', 'MM_type_G–T_POS16', 'MM_type_G–C_POS16', 'MM_type_C–A_POS16', 'MM_type_C–T_POS16', 'MM_type_C–G_POS16', 'MM_type_other_POS16']
for i in l1:
#print(df.groupby("Y")[i].describe())
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
from scipy import stats
print(i)
print(stats.shapiro(POT[i]))
print(stats.shapiro(NOT[i]))
print(stats.ttest_ind(POT[i], NOT[i], equal_var = False))
print("\n")
###Output
MM_type_A–T_POS16
ShapiroResult(statistic=0.06971222162246704, pvalue=1.6815581571897805e-44)
ShapiroResult(statistic=0.06961178779602051, pvalue=1.5414283107572988e-44)
Ttest_indResult(statistic=0.0030261460617221144, pvalue=0.9975860649340222)
MM_type_A–C_POS16
ShapiroResult(statistic=0.03488314151763916, pvalue=5.605193857299268e-45)
ShapiroResult(statistic=0.12379896640777588, pvalue=1.2051166793193427e-43)
Ttest_indResult(statistic=-2.5151111358066585, pvalue=0.012118598686519414)
MM_type_A–G_POS16
ShapiroResult(statistic=0.02032226324081421, pvalue=2.802596928649634e-45)
ShapiroResult(statistic=0.13166457414627075, pvalue=1.6255062186167878e-43)
Ttest_indResult(statistic=-3.0798508023492346, pvalue=0.002163754324073748)
MM_type_T–C_POS16
ShapiroResult(statistic=0.05897265672683716, pvalue=1.1210387714598537e-44)
ShapiroResult(statistic=0.09845596551895142, pvalue=4.624284932271896e-44)
Ttest_indResult(statistic=-1.1580408251249505, pvalue=0.24713974068479352)
MM_type_T–G_POS16
ShapiroResult(statistic=0.03488314151763916, pvalue=5.605193857299268e-45)
ShapiroResult(statistic=0.2870124578475952, pvalue=1.0420335640412205e-40)
Ttest_indResult(statistic=-5.98775779985798, pvalue=3.7251696993034566e-09)
MM_type_T–A_POS16
ShapiroResult(statistic=0.02032226324081421, pvalue=2.802596928649634e-45)
ShapiroResult(statistic=0.09845596551895142, pvalue=4.624284932271896e-44)
Ttest_indResult(statistic=-2.3461676539794296, pvalue=0.01926468583281722)
MM_type_G–A_POS16
ShapiroResult(statistic=0.02032226324081421, pvalue=2.802596928649634e-45)
ShapiroResult(statistic=0.13166457414627075, pvalue=1.6255062186167878e-43)
Ttest_indResult(statistic=-3.0798508023492346, pvalue=0.002163754324073748)
MM_type_G–T_POS16
ShapiroResult(statistic=0.03488314151763916, pvalue=5.605193857299268e-45)
ShapiroResult(statistic=0.10722720623016357, pvalue=6.445972935894159e-44)
Ttest_indResult(statistic=-2.1223567063704074, pvalue=0.03413518700340471)
MM_type_G–C_POS16
ShapiroResult(statistic=0.04745042324066162, pvalue=8.407790785948902e-45)
ShapiroResult(statistic=0.09845596551895142, pvalue=4.624284932271896e-44)
Ttest_indResult(statistic=-1.513202984791976, pvalue=0.13059051449306164)
MM_type_C–A_POS16
ShapiroResult(statistic=1.0, pvalue=1.0)
ShapiroResult(statistic=0.10722720623016357, pvalue=6.445972935894159e-44)
Ttest_indResult(statistic=-3.0231663671920095, pvalue=0.0026239106331308205)
MM_type_C–T_POS16
ShapiroResult(statistic=0.3824275732040405, pvalue=1.0795737893810966e-38)
ShapiroResult(statistic=0.10722720623016357, pvalue=6.445972935894159e-44)
Ttest_indResult(statistic=6.81777267838023, pvalue=2.035566040283443e-11)
MM_type_C–G_POS16
ShapiroResult(statistic=0.03488314151763916, pvalue=5.605193857299268e-45)
ShapiroResult(statistic=0.13166457414627075, pvalue=1.6255062186167878e-43)
Ttest_indResult(statistic=-2.695584513439305, pvalue=0.007195834172495926)
MM_type_other_POS16
ShapiroResult(statistic=0.02032226324081421, pvalue=2.802596928649634e-45)
ShapiroResult(statistic=0.08930468559265137, pvalue=3.2229864679470793e-44)
Ttest_indResult(statistic=-2.130835704701895, pvalue=0.03346461683666276)
###Markdown
mismatch at position 17
###Code
import pandas as pd
from scipy import stats
from scipy.stats import sem
OT_data1=pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv", index_col=[0], encoding="cp1252")
l2=[]
l3=[]
df = pd.DataFrame(OT_data1)
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
print(len(POT))
print(len(NOT))
#print(len(df))
l1=['MM_type_A–T_POS17', 'MM_type_A–C_POS17', 'MM_type_A–G_POS17', 'MM_type_T–C_POS17', 'MM_type_T–G_POS17', 'MM_type_T–A_POS17', 'MM_type_G–A_POS17', 'MM_type_G–T_POS17', 'MM_type_G–C_POS17', 'MM_type_C–A_POS17', 'MM_type_C–T_POS17', 'MM_type_C–G_POS17', 'MM_type_other_POS17']
for i in l1:
total = POT[i].sum()
l2.append(total/524)
l3.append(sem(POT[i]))
print("\n Positive off-target \n")
print(l2)
print(l3)
l2=[]
l3=[]
for i in l1:
total = NOT[i].sum()
l2.append(total/525)
l3.append(sem(NOT[i]))
print("\n Negative off-targets \n")
print(l2)
print(l3)
import pandas as pd
df= pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv", index_col=[0], encoding="cp1252")
l1=['MM_type_A–T_POS17', 'MM_type_A–C_POS17', 'MM_type_A–G_POS17', 'MM_type_T–C_POS17', 'MM_type_T–G_POS17', 'MM_type_T–A_POS17', 'MM_type_G–A_POS17', 'MM_type_G–T_POS17', 'MM_type_G–C_POS17', 'MM_type_C–A_POS17', 'MM_type_C–T_POS17', 'MM_type_C–G_POS17', 'MM_type_other_POS17']
for i in l1:
#print(df.groupby("Y")[i].describe())
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
from scipy import stats
print(i)
print(stats.shapiro(POT[i]))
print(stats.shapiro(NOT[i]))
print(stats.ttest_ind(POT[i], NOT[i], equal_var = False))
print("\n")
###Output
MM_type_A–T_POS17
ShapiroResult(statistic=0.02032226324081421, pvalue=2.802596928649634e-45)
ShapiroResult(statistic=0.13928401470184326, pvalue=2.1720126197034665e-43)
Ttest_indResult(statistic=-3.2408211076048796, pvalue=0.0012571431699250233)
MM_type_A–C_POS17
ShapiroResult(statistic=0.03488314151763916, pvalue=5.605193857299268e-45)
ShapiroResult(statistic=0.13928401470184326, pvalue=2.1720126197034665e-43)
Ttest_indResult(statistic=-2.8674332971783785, pvalue=0.004265274533808534)
MM_type_A–G_POS17
ShapiroResult(statistic=0.4217395782470703, pvalue=8.406314938406276e-38)
ShapiroResult(statistic=0.11566287279129028, pvalue=8.828180325246348e-44)
Ttest_indResult(statistic=7.707843928945487, pvalue=4.586963680171943e-14)
MM_type_T–C_POS17
ShapiroResult(statistic=0.02032226324081421, pvalue=2.802596928649634e-45)
ShapiroResult(statistic=0.13928401470184326, pvalue=2.1720126197034665e-43)
Ttest_indResult(statistic=-3.2408211076048796, pvalue=0.0012571431699250233)
MM_type_T–G_POS17
ShapiroResult(statistic=0.06971222162246704, pvalue=1.6815581571897805e-44)
ShapiroResult(statistic=0.13928401470184326, pvalue=2.1720126197034665e-43)
Ttest_indResult(statistic=-1.9001751582391653, pvalue=0.05773677236895585)
MM_type_T–A_POS17
ShapiroResult(statistic=0.04745042324066162, pvalue=8.407790785948902e-45)
ShapiroResult(statistic=0.1933377981185913, pvalue=1.8637269575520067e-42)
Ttest_indResult(statistic=-3.7359853062104422, pvalue=0.00020276261208925026)
MM_type_G–A_POS17
ShapiroResult(statistic=0.08943074941635132, pvalue=3.5032461608120427e-44)
ShapiroResult(statistic=0.08930468559265137, pvalue=3.2229864679470793e-44)
Ttest_indResult(statistic=0.003587496657737334, pvalue=0.9971382813515856)
MM_type_G–T_POS17
ShapiroResult(statistic=0.03488314151763916, pvalue=5.605193857299268e-45)
ShapiroResult(statistic=0.10722720623016357, pvalue=6.445972935894159e-44)
Ttest_indResult(statistic=-2.1223567063704074, pvalue=0.03413518700340471)
MM_type_G–C_POS17
ShapiroResult(statistic=0.04745042324066162, pvalue=8.407790785948902e-45)
ShapiroResult(statistic=0.05888634920120239, pvalue=1.1210387714598537e-44)
Ttest_indResult(statistic=-0.3764182676987786, pvalue=0.7066837079381533)
MM_type_C–A_POS17
ShapiroResult(statistic=0.03488314151763916, pvalue=5.605193857299268e-45)
ShapiroResult(statistic=0.10722720623016357, pvalue=6.445972935894159e-44)
Ttest_indResult(statistic=-2.1223567063704074, pvalue=0.03413518700340471)
MM_type_C–T_POS17
ShapiroResult(statistic=0.03488314151763916, pvalue=5.605193857299268e-45)
ShapiroResult(statistic=0.05888634920120239, pvalue=1.1210387714598537e-44)
Ttest_indResult(statistic=-0.8162385006882964, pvalue=0.41456949370750174)
MM_type_C–G_POS17
ShapiroResult(statistic=0.04745042324066162, pvalue=8.407790785948902e-45)
ShapiroResult(statistic=0.10722720623016357, pvalue=6.445972935894159e-44)
Ttest_indResult(statistic=-1.7403948643345135, pvalue=0.08215498613319558)
MM_type_other_POS17
ShapiroResult(statistic=1.0, pvalue=1.0)
ShapiroResult(statistic=0.13166457414627075, pvalue=1.6255062186167878e-43)
Ttest_indResult(statistic=-3.5010441216186448, pvalue=0.0005029722612375902)
###Markdown
mismatch at position 18
###Code
import pandas as pd
from scipy import stats
from scipy.stats import sem
OT_data1=pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv", index_col=[0], encoding="cp1252")
l2=[]
l3=[]
df = pd.DataFrame(OT_data1)
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
print(len(POT))
print(len(NOT))
#print(len(df))
l1=['MM_type_A–T_POS18', 'MM_type_A–C_POS18', 'MM_type_A–G_POS18', 'MM_type_T–C_POS18', 'MM_type_T–G_POS18', 'MM_type_T–A_POS18', 'MM_type_G–A_POS18', 'MM_type_G–T_POS18', 'MM_type_G–C_POS18', 'MM_type_C–A_POS18', 'MM_type_C–T_POS18', 'MM_type_C–G_POS18', 'MM_type_other_POS18']
for i in l1:
total = POT[i].sum()
l2.append(total/524)
l3.append(sem(POT[i]))
print("\n Positive off-target \n")
print(l2)
print(l3)
l2=[]
l3=[]
for i in l1:
total = NOT[i].sum()
l2.append(total/525)
l3.append(sem(NOT[i]))
print("\n Negative off-targets \n")
print(l2)
print(l3)
import pandas as pd
df= pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv", index_col=[0], encoding="cp1252")
l1=['MM_type_A–T_POS18', 'MM_type_A–C_POS18', 'MM_type_A–G_POS18', 'MM_type_T–C_POS18', 'MM_type_T–G_POS18', 'MM_type_T–A_POS18', 'MM_type_G–A_POS18', 'MM_type_G–T_POS18', 'MM_type_G–C_POS18', 'MM_type_C–A_POS18', 'MM_type_C–T_POS18', 'MM_type_C–G_POS18', 'MM_type_other_POS18']
for i in l1:
#print(df.groupby("Y")[i].describe())
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
from scipy import stats
print(i)
print(stats.shapiro(POT[i]))
print(stats.shapiro(NOT[i]))
print(stats.ttest_ind(POT[i], NOT[i], equal_var = False))
print("\n")
###Output
MM_type_A–T_POS18
ShapiroResult(statistic=0.02032226324081421, pvalue=2.802596928649634e-45)
ShapiroResult(statistic=0.1608561873435974, pvalue=5.05868745621259e-43)
Ttest_indResult(statistic=-3.687160552099762, pvalue=0.0002476444065083602)
MM_type_A–C_POS18
ShapiroResult(statistic=0.02032226324081421, pvalue=2.802596928649634e-45)
ShapiroResult(statistic=0.1608561873435974, pvalue=5.05868745621259e-43)
Ttest_indResult(statistic=-3.687160552099762, pvalue=0.0002476444065083602)
MM_type_A–G_POS18
ShapiroResult(statistic=0.03488314151763916, pvalue=5.605193857299268e-45)
ShapiroResult(statistic=0.13166457414627075, pvalue=1.6255062186167878e-43)
Ttest_indResult(statistic=-2.6955845134393046, pvalue=0.007195834172495933)
MM_type_T–C_POS18
ShapiroResult(statistic=0.02032226324081421, pvalue=2.802596928649634e-45)
ShapiroResult(statistic=0.1608561873435974, pvalue=5.05868745621259e-43)
Ttest_indResult(statistic=-3.687160552099762, pvalue=0.0002476444065083602)
MM_type_T–G_POS18
ShapiroResult(statistic=0.02032226324081421, pvalue=2.802596928649634e-45)
ShapiroResult(statistic=0.11566287279129028, pvalue=8.828180325246348e-44)
Ttest_indResult(statistic=-2.7339765624870176, pvalue=0.006433316357317949)
MM_type_T–A_POS18
ShapiroResult(statistic=0.03488314151763916, pvalue=5.605193857299268e-45)
ShapiroResult(statistic=0.18080121278762817, pvalue=1.1210387714598537e-42)
Ttest_indResult(statistic=-3.767533608344902, pvalue=0.00018015617058038242)
MM_type_G–A_POS18
ShapiroResult(statistic=0.14687281847000122, pvalue=3.1389085600875902e-43)
ShapiroResult(statistic=0.1608561873435974, pvalue=5.05868745621259e-43)
Ttest_indResult(statistic=-0.36488407349485835, pvalue=0.7152719146486024)
MM_type_G–T_POS18
ShapiroResult(statistic=0.05897265672683716, pvalue=1.1210387714598537e-44)
ShapiroResult(statistic=0.05888634920120239, pvalue=1.1210387714598537e-44)
Ttest_indResult(statistic=0.0027040659996857503, pvalue=0.9978429852313967)
MM_type_G–C_POS18
ShapiroResult(statistic=1.0, pvalue=1.0)
ShapiroResult(statistic=0.10722720623016357, pvalue=6.445972935894159e-44)
Ttest_indResult(statistic=-3.0231663671920095, pvalue=0.0026239106331308205)
MM_type_C–A_POS18
ShapiroResult(statistic=1.0, pvalue=1.0)
ShapiroResult(statistic=0.06961178779602051, pvalue=1.5414283107572988e-44)
Ttest_indResult(statistic=-2.2446517632945957, pvalue=0.025206931337515046)
MM_type_C–T_POS18
ShapiroResult(statistic=0.41604894399642944, pvalue=6.202659317820198e-38)
ShapiroResult(statistic=0.11566287279129028, pvalue=8.828180325246348e-44)
Ttest_indResult(statistic=7.549186637628393, pvalue=1.4163882532781699e-13)
MM_type_C–G_POS18
ShapiroResult(statistic=0.02032226324081421, pvalue=2.802596928649634e-45)
ShapiroResult(statistic=0.06961178779602051, pvalue=1.5414283107572988e-44)
Ttest_indResult(statistic=-1.6369060897558205, pvalue=0.10208248787025376)
MM_type_other_POS18
ShapiroResult(statistic=1.0, pvalue=1.0)
ShapiroResult(statistic=0.1608561873435974, pvalue=5.05868745621259e-43)
Ttest_indResult(statistic=-4.058511151905034, pvalue=5.6918917422593785e-05)
###Markdown
mismatch at position 23
###Code
import pandas as pd
from scipy import stats
from scipy.stats import sem
OT_data1=pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv", index_col=[0], encoding="cp1252")
l2=[]
l3=[]
df = pd.DataFrame(OT_data1)
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
print(len(POT))
print(len(NOT))
#print(len(df))
l1=['MM_type_A–T_POS23', 'MM_type_A–C_POS23', 'MM_type_A–G_POS23', 'MM_type_T–C_POS23', 'MM_type_T–G_POS23', 'MM_type_T–A_POS23', 'MM_type_G–A_POS23', 'MM_type_G–T_POS23', 'MM_type_G–C_POS23', 'MM_type_C–A_POS23', 'MM_type_C–T_POS23', 'MM_type_C–G_POS23', 'MM_type_other_POS23']
for i in l1:
total = POT[i].sum()
l2.append(total/524)
l3.append(sem(POT[i]))
print("\n Positive off-target \n")
print(l2)
print(l3)
l2=[]
l3=[]
for i in l1:
total = NOT[i].sum()
l2.append(total/525)
l3.append(sem(NOT[i]))
print("\n Negative off-targets \n")
print(l2)
print(l3)
import pandas as pd
df= pd.read_csv("D://PhD_related/manuscript_obj1/seq_encoding_map/LbCpf1/LbCpf1_dataset-sampled-525PN.csv", index_col=[0], encoding="cp1252")
l1=['MM_type_A–T_POS23', 'MM_type_A–C_POS23', 'MM_type_A–G_POS23', 'MM_type_T–C_POS23', 'MM_type_T–G_POS23', 'MM_type_T–A_POS23', 'MM_type_G–A_POS23', 'MM_type_G–T_POS23', 'MM_type_G–C_POS23', 'MM_type_C–A_POS23', 'MM_type_C–T_POS23', 'MM_type_C–G_POS23', 'MM_type_other_POS23']
for i in l1:
#print(df.groupby("Y")[i].describe())
POT = df[(df['Y'] == 1)]
NOT = df[(df['Y'] == 0)]
from scipy import stats
print(i)
print(stats.shapiro(POT[i]))
print(stats.shapiro(NOT[i]))
print(stats.ttest_ind(POT[i], NOT[i], equal_var = False))
print("\n")
###Output
MM_type_A–T_POS23
ShapiroResult(statistic=0.06971222162246704, pvalue=1.6815581571897805e-44)
ShapiroResult(statistic=0.08930468559265137, pvalue=3.2229864679470793e-44)
Ttest_indResult(statistic=-0.576991315927017, pvalue=0.5640726112881687)
MM_type_A–C_POS23
ShapiroResult(statistic=1.0, pvalue=1.0)
ShapiroResult(statistic=0.1608561873435974, pvalue=5.05868745621259e-43)
Ttest_indResult(statistic=-4.058511151905034, pvalue=5.6918917422593785e-05)
MM_type_A–G_POS23
ShapiroResult(statistic=0.07982927560806274, pvalue=2.5223372357846707e-44)
ShapiroResult(statistic=0.07971543073654175, pvalue=2.2420775429197073e-44)
Ttest_indResult(statistic=0.003318172088653855, pvalue=0.997353118679428)
MM_type_T–C_POS23
ShapiroResult(statistic=1.0, pvalue=1.0)
ShapiroResult(statistic=0.10722720623016357, pvalue=6.445972935894159e-44)
Ttest_indResult(statistic=-3.0231663671920095, pvalue=0.0026239106331308205)
MM_type_T–G_POS23
ShapiroResult(statistic=0.11582159996032715, pvalue=9.528829557408756e-44)
ShapiroResult(statistic=0.09845596551895142, pvalue=4.624284932271896e-44)
Ttest_indResult(statistic=0.47912640860437583, pvalue=0.6319500099689651)
MM_type_T–A_POS23
ShapiroResult(statistic=0.09859353303909302, pvalue=5.044674471569341e-44)
ShapiroResult(statistic=0.14667779207229614, pvalue=2.9006878211523713e-43)
Ttest_indResult(statistic=-1.2884328831611422, pvalue=0.1978998842713464)
MM_type_G–A_POS23
ShapiroResult(statistic=0.08943074941635132, pvalue=3.5032461608120427e-44)
ShapiroResult(statistic=0.12379896640777588, pvalue=1.2051166793193427e-43)
Ttest_indResult(statistic=-0.9466224126596177, pvalue=0.3440598024197635)
MM_type_G–T_POS23
ShapiroResult(statistic=0.04745042324066162, pvalue=8.407790785948902e-45)
ShapiroResult(statistic=0.047379910945892334, pvalue=7.006492321624085e-45)
Ttest_indResult(statistic=0.002339543521356175, pvalue=0.9981337617218948)
MM_type_G–C_POS23
ShapiroResult(statistic=1.0, pvalue=1.0)
ShapiroResult(statistic=0.10722720623016357, pvalue=6.445972935894159e-44)
Ttest_indResult(statistic=-3.0231663671920095, pvalue=0.0026239106331308205)
MM_type_C–A_POS23
ShapiroResult(statistic=0.05897265672683716, pvalue=1.1210387714598537e-44)
ShapiroResult(statistic=0.11566287279129028, pvalue=8.828180325246348e-44)
Ttest_indResult(statistic=-1.6118943785985171, pvalue=0.10733979321129199)
MM_type_C–T_POS23
ShapiroResult(statistic=0.07982927560806274, pvalue=2.5223372357846707e-44)
ShapiroResult(statistic=0.0202905535697937, pvalue=2.802596928649634e-45)
Ttest_indResult(statistic=1.8988567784294825, pvalue=0.05799803780775564)
MM_type_C–G_POS23
ShapiroResult(statistic=0.5900723934173584, pvalue=2.5736897798277037e-33)
ShapiroResult(statistic=0.13166457414627075, pvalue=1.6255062186167878e-43)
Ttest_indResult(statistic=14.03495901459156, pvalue=4.030746023101553e-39)
MM_type_other_POS23
ShapiroResult(statistic=0.02032226324081421, pvalue=2.802596928649634e-45)
ShapiroResult(statistic=0.12379896640777588, pvalue=1.2051166793193427e-43)
Ttest_indResult(statistic=-2.911309988101577, pvalue=0.003728338150254819)
|
random forest.ipynb | ###Markdown
Random Forest
###Code
from sklearn import tree
import pandas as pd
from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test = train_test_split(data, target, random_state=42)
dff = pd.read_csv("output.csv.txt")
dff.columns
dff
df_drop = dff.drop(columns=['Unnamed: 0', 'Customer ID','Dealer codes from where customer has purchased the Two wheeler'])
df_drop
target = df_drop["Target variable ( 1: Defaulters / 0: Non-Defaulters)"]
target_names = ["Not Defaulted","Defaulted"]
data = df_drop.drop("Target variable ( 1: Defaulters / 0: Non-Defaulters)", axis=1)
feature_names = data.columns
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data, target, random_state=42)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
clf.score(X_test, y_test)
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=200)
rf = rf.fit(X_train, y_train)
rf.score(X_test, y_test)
sorted(zip(rf.feature_importances_, feature_names), reverse=True)
###Output
_____no_output_____ |
tutorial_pytorch_book/01_pytorch.ipynb | ###Markdown
PytorchComo dijimos anteriormente, PyTorch es un paquete de Python diseñado para realizar cálculos numéricos haciendo uso de la programación de tensores. Además permite su ejecución en GPU para acelerar los cálculos.En la práctica es un sustituto bastante potente de Numpy, una librería casi estándar para trabajar con arrays en python. ¿Cómo funciona pytorch? Vamos a ver un tutorial rápido del tipo de datos de pytorch y cómo trabaja internamente esta librería. Para esto tendrás que haber seguido correctamente todos los pasos anteriores. Para esto necesitas la **versión interactiva del notebook**. Para esta sección: * **Abre Jupyter** (consultar arriba)* Navega hasta el notebook `00 Práctica Deep Learning - Introducción.ipynb` y ábrelo.* Baja hasta esta sección. Pero antes de nada os cuento algunas diferencias entre matlab y python: * Python es un **lenguaje de propósito general** mientras que matlab es un lenguaje **específico para ciencia e ingeniería**. Esto no es ni bueno ni malo; matlab es más fácil de utilizar para ingeniería sin preparación, pero python es más versátil. * Debido a ello, **Matlab carga automáticamente todas las funciones** mientras que en Python, **hay que cargar las librerías que vamos a utilizar**. Esto hace que usar funciones en matlab sea más sencillo (dos letras menos que escribir), pero a costa de que es más difícil gestionar la memoria, y los nombres de funciones se puden superponer. Supon que `A` es una matriz. Para hacer la pseudoinversa, en matlab hacemos: ```matlabpinv(A)```* en python tenemos que cargar la librería:```pythonimport scipy as spsp.pinv(A)```* Esto genera una cosa llamada **espacio de nombres**, en el que las funciones de cada librería van precedidas por su abreviatura (si importamos con `import x as y`) o el propio nombre si usamos `import torch`, `torch.tensor()`, mientras que en matlab basta con llamar a la función. Por ejemplo, cuando en matlab escribimos: - `vector = [1, 2, 3]`* en python+pytorch necesitamos especificar que es un tensor (un array multidimensional): - `vector = torch.tensor([1,2,3])`Vamos a cargar la librería con `import torch` y ver que podemos, por ejemplo, construir una matriz de 5x3 aleatoria. Para ejecutar una celda, basta con seleccionarla (bien con las flechas del teclado, bien con el ratón) y pulsando `Ctrl+Enter` (o bien pulsando "Run" en la barra superior).
###Code
import torch
x = torch.rand(5, 3)
print(x)
###Output
tensor([[0.2472, 0.7132, 0.1375],
[0.7200, 0.2924, 0.3832],
[0.4341, 0.3518, 0.9204],
[0.3709, 0.2253, 0.6080],
[0.9817, 0.5234, 0.2136]])
###Markdown
O una matriz de ceros:
###Code
x = torch.zeros(5, 3, dtype=torch.long)
print(x)
###Output
tensor([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
###Markdown
O a partir de unos datos dados, y podemos mostrarla con `print`, pero también acceder a sus características, como el tamaño de la matriz:
###Code
x = torch.tensor([[5.5, 3, 3],[2,1, 5], [3,4,2],[7,6,5],[2,1,2]])
print(x)
print(x.shape)
###Output
tensor([[5.5000, 3.0000, 3.0000],
[2.0000, 1.0000, 5.0000],
[3.0000, 4.0000, 2.0000],
[7.0000, 6.0000, 5.0000],
[2.0000, 1.0000, 2.0000]])
torch.Size([5, 3])
###Markdown
Con tensores se puede operar de forma normal:
###Code
y = torch.rand(5, 3)
print(x + y)
###Output
tensor([[6.3343, 3.7080, 3.9404],
[2.3815, 1.0040, 5.7915],
[3.0152, 4.8507, 2.5595],
[7.2281, 6.1131, 5.3825],
[2.3290, 1.9387, 2.1796]])
###Markdown
Pero OJO CUIDAO, tienen que ser del mismo tamaño, si no, va a dar error:
###Code
y = torch.rand(2,3)
print(x+y)
###Output
_____no_output_____
###Markdown
Se puede hacer *slicing* como en numpy o Matlab. Por ejemplo, para extraer la primera columna:
###Code
print(x[:, 1])
###Output
tensor([3., 1., 4., 6., 1.])
###Markdown
Otra característica que nos será de mucha utilidad es cambiar la forma de la matriz, que en otros lenguajes se conoce como `reshape`, y aquí es un método del objeto tensor llamado `view()`:
###Code
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # the size -1 is inferred from other dimensions
print(x.size(), y.size(), z.size())
###Output
torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])
###Markdown
Podemos operar con tensores y valores escalares:
###Code
y = x + 2
print(y)
###Output
tensor([[2.9619, 1.8762, 2.1108, 2.6227],
[2.4012, 2.8760, 0.8042, 1.2621],
[1.2319, 2.2896, 1.6425, 3.2375],
[3.3435, 2.0807, 3.2948, 0.7555]])
###Markdown
Y también podemos definir funciones que realicen estas operaciones que apliquemos a los diferentes tensores:
###Code
def modulo(x,y):
aux = x**2 + y**2
salida = torch.sqrt(aux)
return salida
print(modulo(x,y))
###Output
tensor([[3.1142, 1.8803, 2.1137, 2.6956],
[2.4345, 3.0064, 1.4411, 1.4620],
[1.4517, 2.3079, 1.6809, 3.4660],
[3.6033, 2.0823, 3.5401, 1.4559]])
###Markdown
Y, una parte fundamental es que pytorch conserva memoria de las operaciones realizadas en un vector:
###Code
x = torch.ones(2, 2, requires_grad=True)
y = x + 2
print(y)
###Output
tensor([[3., 3.],
[3., 3.]], grad_fn=<AddBackward0>)
###Markdown
La propiedad `grad_fn` será fundamental en el entrenamiento de redes neuronales, ya que guarda el gradiente de la operación o función que se haya aplicado a los datos. Esto se conserva a traves de todas las operaciones:
###Code
z = y * y * 3
out = z.mean()
print(z, out)
###Output
tensor([[27., 27.],
[27., 27.]], grad_fn=<MulBackward0>) tensor(27., grad_fn=<MeanBackward0>)
###Markdown
O incluso llevan cuenta de las operaciones realizadas con funciones:
###Code
print(modulo(x,y))
###Output
tensor([[3.1623, 3.1623],
[3.1623, 3.1623]], grad_fn=<SqrtBackward>)
###Markdown
Para calcular el gradiente a lo largo de estas operaciones se utiliza la función `.backward()`, que realiza la propagación del gradiente hacia atrás. Podemos mostrar el gradiente $\frac{\partial out}{\partial x}$ con la propiedad `x.grad`, así que lo vemos:
###Code
out.backward()
print(x.grad)
###Output
tensor([[4.5000, 4.5000],
[4.5000, 4.5000]])
###Markdown
Habrá aquí una matriz de 2x2 con valores 4.5. Si llamamos el tensor de salida $o$, tenemos que:$$ o = \frac{1}{4} \sum_iz_i, \quad z_i = 3(x_i + 2)^2$$Así que $z_i|_{x_i=1} = 27$. Entonces, la $\frac{\partial o}{\partial x_i} = \frac{3}{2}(x_i+2)$ y $\frac{\partial o}{\partial x_i} |_{x_i=1} = \frac{9}{2} = 4.5$Gracias a esto, y a las matemáticas del algoritmo de propagación hacia atrás (*backpropagation*, ver video de introducción a la práctica), se pueden actualizar los pesos en función de una función de pérdida en las redes neuronales. Se puede activar y desactivar el cálculo del gradiente con la expresión `torch.no_grad()`.
###Code
print(x.requires_grad)
print((x ** 2).requires_grad)
with torch.no_grad():
print((x ** 2).requires_grad)
###Output
True
True
False
|
analysis-poe2015.ipynb | ###Markdown
Gender representation at specialized astronomy conferences *Nick Cox* Following the recent study by Kyle Willett on gender representation at specialized astronomy conferences I have gathered similar data at another specialised astrophysics meeting, "Physics of Evolved Stars" (henceforth, POE2015). POE2015 was held in Nice, France from [8-12 June, 2015](http://poe2015.sciencesconf.org). The analysis strategy (ipyton notebook) has been cloned from Kyle Willet's github respository.Briefly, recapitulating, the goal of these surveys is to track participation at conferences as a function of gender, particularly relating to question/answer sessions after oral presentations. This is intended to address some basic questions about behavior at conferences, such as:* How equal are the allotments of talks among men and women?* Are men and women asking questions at the same rate?* Does it matter if the speaker/session chair is a man or a woman?* Are women/men more likely to ask the ***first*** question in a session? Does this affect the gender balance of remaining questions?These questions were first addressed with data-gathering efforts first led by James Davenport at the [223rd American Astronomical Society (AAS) meeting in 2014](http://www.ifweassume.com/2014/03/report-gender-in-aas-talks.html), and repeated at the [225th AAS meeting in 2015](http://nbviewer.ipython.org/github/jradavenport/aas225-gender/blob/master/analysis.ipynb) and the [National Astronomy Meeting (NAM) in the UK in 2014](http://arxiv.org/abs/1412.4571). These focus on question/answer sessions since these are public, quantifiable, and one of the main ways in which professional interaction takes place. These studies found that men ask disproportionally more questions than women, and that the gender of the session chair has a strong impact on the gender ratio of questioners. Following the gender representation at the above mentioned large, thematically broad meetings, Kyle Willett addressed the issue whether smaller, more specialized meetings follow the same trends. He tracked this data for a recent conference on "Unveiling the AGN-Galaxy Evolution Connection" (Puerto Varas, Chile from [9-13 March 2015](http://www.astro-udec.cl/agn15/)). There were a total of 200 people on the list of participants and the gender of speakers, chairs, and questioners for all 72 talks were tracked.For POE215 there were 133 registered participants (97 male / 36 female) and a total of 42 oral presentation (excluding the intial two "review" talks (MM) and conference summary (F)). In addition to the gender of speakers, chairs and questioners, I also attempted to track the identity of the (most frequent) questioners. The discussion sessions are excluded for pratical reasons. Questioners were tracked for 39 talks. What are the overall demographics of the conference?
###Code
%pylab inline
'''
Note: this notebook requires the following, fairly standard Python packages:
numpy
scipy
matplotlib
pandas
And one not-as-common package (also available through PyPI):
sexmachine
'''
from __future__ import division
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
gencolors =('purple','orange')
#urlbase = 'https://raw.githubusercontent.com/kalaschsoyuz/poe2015-gender/master'
#urlbase = 'http://localhost:8888/edit/Downloads/chile2015-gender-master/'
q = pd.read_csv('%s/question_data.csv' % urlbase)
c = pd.read_csv('%s/chair_data.csv' % urlbase)
fig = plt.figure(1,(6,6))
# Speakers
vc_speakers = q['speaker'].value_counts()
# People asking questions of the speakers
qa=list(q['questions'])
qa.remove(' ')
vc_questioners = pd.value_counts(list(''.join(qa)))
# Chairs of the sessions
vc_chairs = c['gender'].value_counts()
# Attendees
number_attendees = 133
number_f = 36
number_m = 97
#countrydata = pd.read_csv('%s/map/countries.csv' % urlbase)
#names = countrydata['name']
#firstnames = [x.split(' ')[0] for x in names]
# Guess (based on first names) what gender the attendees are
#from sexmachine import detector as gender
#d = gender.Detector(case_sensitive=False)
#from collections import Counter
#genders = [d.get_gender(fn) for fn in firstnames]
#cg = Counter(genders)
#attendees = list('M'*(cg['male'] + cg['mostly_male'])+'F'*(cg['female'] + cg['mostly_female']))
# Ignores users whose gender cannot be reliably determined from first name
#vc_genderdata = pd.read_csv('%s/.csv' % urlbase)
#vc_attendees = pd.value_counts(attendees)
# People who asked the first question of the speakers
first = [x[1]['questions'][0] for x in q.iterrows()]
first.remove(' ')
vc_firstquestion = pd.value_counts(first)
vc_personidentifier = pd.read_csv('%s/.csv' % urlbase)
# Load everything into a single dataframe
data = [vc_speakers,vc_chairs,vc_attendees,vc_questioners,vc_firstquestion][::-1]
labels = ['Speakers','Chairs','Attendees','Questioners','First question'][::-1]
normdata = [x/x.sum() for x in data]
# Plot stacked bar chart
ax1 = fig.add_subplot(111)
df = pd.DataFrame(normdata,index=labels)
dfplot = df.plot(kind='barh',stacked=True,ax=ax1,color=gencolors,legend=True)
print df
# Plot the 50-50 split for guidance
ylims1 = ax1.get_ylim()
ax1.vlines(0.5,ylims1[0],ylims1[1],color='k',linestyle='-')
ax1.set_xlabel('Fraction of participants',fontsize=20)
ax1.set_title('Overall demographics at the POE2015 meeting')
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
Analysis of overall demographics: As in the previous survey (Willett 2015) I tracked gender participation for five aspects of the question/answer sessions. In addition I tracked the identify of the question askers:**** speakers* chairs of the sessions* all attendees of the conference* people who asked questions of the speaker* people who asked the **first** question of the speaker for any given talk* people who asked the **most** questions ***The gender ratio of the speakers closely matched that of the attendees as a whole (XX% and XX%, respectively). Overall demographics for POE2015 compared to other conferences
###Code
# Plot stacked bar chart
fig = plt.figure(1,(18,6))
ax1 = fig.add_subplot(131)
df = pd.DataFrame(normdata,index=labels)
dfplot = df.plot(kind='barh',stacked=True,ax=ax1,color=gencolors,legend=False)
print df
# Find data positions of the plots
patches = dfplot.patches
yc = [p.get_y() for p in patches]
yc = yc[:int(len(yc)/2)]
height = p.get_height()
ylims1 = ax1.get_ylim()
ax1.vlines(0.5,ylims1[0],ylims1[1],color='k',linestyle='-')
def getfrac(m,f):
return m/(f+m)
# Speaker/questioner data from 225th AAS Meeting (Seattle, WA)
# Data from https://github.com/jradavenport/aas225-gender
aas225_speakers = getfrac(83,51)
aas225_questionaskers = getfrac(305,73)
aas225_firstquestion = getfrac(102,32)
ax1.vlines(aas225_speakers,yc[-1],yc[-1]+height,color='g',linestyle='--')
ax1.vlines(aas225_questionaskers,yc[-4],yc[-4]+height,color='g',linestyle='--')
ax1.vlines(aas225_firstquestion,yc[-5],yc[-5]+height,color='g',linestyle='--')
ax1.text(aas225_speakers,yc[-1]+height,'AAS',ha='center',va='bottom',fontsize=14,color='g')
ax1.set_xlabel('Fraction of participants',fontsize=20)
p,l = ax1.get_legend_handles_labels()
ax1.legend(p,l,loc='upper left')
ax1.set_title('AGN2015 vs. speakers at AAS 2015 Winter')
# Speaker/questioner data from National Astronomy Meeting 2014 (Portsmouth, UK)
# Data from Pritchard et al. (2014) http://arXiv.org/abs/1412.4571
ax2 = fig.add_subplot(132)
dfblank = pd.DataFrame(normdata,index=[' ']*5)
dfplot2 = dfblank.plot(kind='barh',stacked=True,ax=ax2,color=gencolors,legend=False)
nam_speakers = getfrac(181,81)
nam_chairs = getfrac(188,75)
nam_attendees = getfrac(452,172)
nam_questionaskers = getfrac(476,101)
nam_firstquestion = getfrac(216,35)
ylims2 = ax2.get_ylim()
ax2.vlines(0.5,ylims2[0],ylims2[1],color='k',linestyle='-')
ax2.vlines(nam_speakers,yc[-1],yc[-1]+height,color='g',linestyle='--')
ax2.vlines(nam_chairs,yc[-2],yc[-2]+height,color='g',linestyle='--')
ax2.vlines(nam_attendees,yc[-3],yc[-3]+height,color='g',linestyle='--')
ax2.vlines(nam_questionaskers,yc[-4],yc[-4]+height,color='g',linestyle='--')
ax2.vlines(nam_firstquestion,yc[-5],yc[-5]+height,color='g',linestyle='--')
ax2.text(nam_speakers,yc[-1]+height,'NAM',ha='center',va='bottom',fontsize=14,color='g')
ax2.set_xlabel('Fraction of participants',fontsize=20)
ax2.set_title('AGN2015 vs. NAM 2014')
# IAU individual members (as of Apr 2015)
# Data from http://www.iau.org/administration/membership/individual/distribution/
ax3 = fig.add_subplot(133)
dfplot3 = dfblank.plot(kind='barh',stacked=True,ax=ax3,color=gencolors,legend=False)
iau_frac = getfrac(9546,1803)
ylims3 = ax3.get_ylim()
ax3.vlines(0.5,ylims3[0],ylims3[1],color='k',linestyle='-')
ax3.vlines(iau_frac,ylims3[0],yc[-1]+height,color='g',linestyle='--')
ax3.text(iau_frac*1.02,yc[-1]+height,'IAU',ha='center',va='bottom',fontsize=14,color='g')
ax3.set_xlabel('Fraction of participants',fontsize=20)
ax3.set_title('AGN2015 vs. IAU individual members')
# Speaker/questioner data from AGN2015 Meeting (Chile)
# Data from xxxxx.
#agn2015_speakers = getfrac(x,x)
#agn2015_questionaskers = getfrac(x,x)
#agn2015_firstquestion = getfrac(x,x)
agn2015_firstquestion =0.614286
agn2015_questionaskers = 0.641509
agn2015_attendees = 0.564246
agn2015_chairs = 0.785714
agn2015_speakers = 0.549296
ax4.vlines(agn2015_speakers,yc[-1],yc[-1]+height,color='g',linestyle='--')
ax4.vlines(agn2015_questionaskers,yc[-4],yc[-4]+height,color='g',linestyle='--')
ax4.vlines(agn2015_firstquestion,yc[-5],yc[-5]+height,color='g',linestyle='--')
ax4.text(agn2015_speakers,yc[-1]+height,'AGN2015',ha='center',va='bottom',fontsize=14,color='g')
ax4.set_xlabel('Fraction of participants',fontsize=20)
p,l = ax4.get_legend_handles_labels()
ax4.legend(p,l,loc='upper left')
ax4.set_title('POE2015 vs. AGN2015')
###Output
_____no_output_____
###Markdown
Analysis of the demographics compared to other meetings/organizations: Smaller sample size. Frequent-questioners. How many questions per talk were there? Was this affected by the gender of the speaker?
###Code
# How many questions were there per talk? Did the gender of the speaker affect it?
fig2 = plt.figure(2,(12,6))
ax4 = fig2.add_subplot(121)
qpt = [len(x) for x in q['questions']]
ax4.hist(qpt,bins=range(0,8),histtype='step',range=(0,8),linewidth=3, color='k')
ylims4 = ax4.get_ylim()
ax4.vlines(np.mean(qpt),ylims4[0],ylims4[1],linestyle='--',color='black')
ax4.set_xlabel('Questions per talk',fontsize=16)
ax4.set_ylabel('Count')
ax5 = fig2.add_subplot(122)
mq = [len(x[1]['questions']) for x in q.iterrows() if x[1]['speaker'] == 'M']
fq = [len(x[1]['questions']) for x in q.iterrows() if x[1]['speaker'] == 'F']
ax5.hist(mq,bins=range(0,8),histtype='step',range=(0,8),linewidth=3, color='purple',label='Male speaker')
ax5.hist(fq,bins=range(0,8),histtype='step',range=(0,8),linewidth=3, color='orange',label='Female speaker')
ax5.set_ylim(ax4.get_ylim())
ylims5 = ax5.get_ylim()
ax5.vlines(np.mean(mq),ylims5[0],ylims5[1],linestyle='--',color='purple')
ax5.vlines(np.mean(fq),ylims5[0],ylims5[1],linestyle='--',color='orange')
ax5.set_xlabel('Questions per talk',fontsize=16)
ax5.legend(loc='upper right')
plt.show()
# Test to see if the distribution is different for male vs. female speakers
from scipy.stats import ks_2samp
D,p = ks_2samp(mq,fq)
print 'There are %.1f +- %.1f total questions per talk' % (np.mean(qpt),np.std(qpt))
print 'There are %.1f questions per talk when the speaker is male' % np.mean(mq)
print 'There are %.1f questions per talk when the speaker is female ' % np.mean(fq)
print 'There is a %.1f percent chance that the questions are drawn from the same distribution for male and female speakers.' % (p*100)
###Output
_____no_output_____
###Markdown
A difference in the number of questions per talk depending on the speaker might be interpreted as either a positive or negative effect (preferentially ignoring speakers of certain genders, overaggressively questioning/harassing them, paying attention and engaging with them, etc). More analysis on this data set would be pure speculation from me as to which was responsible at this conference. Did the gender of people of asking questions depend on the speaker's gender?
###Code
fig3 = plt.figure(3,(6,6))
malefirst_maleafter = ['M'*x[1]['questions'].count('M') for x in q.iterrows() if x[1]['speaker'] == 'M']
malefirst_femaleafter = ['F'*x[1]['questions'].count('F') for x in q.iterrows() if x[1]['speaker'] == 'M']
femalefirst_maleafter = ['M'*x[1]['questions'].count('M') for x in q.iterrows() if x[1]['speaker'] == 'F']
femalefirst_femaleafter = ["F"*x[1]['questions'].count('F') for x in q.iterrows() if x[1]['speaker'] == 'F']
vc_malefirst = pd.value_counts(list(''.join(malefirst_maleafter+malefirst_femaleafter)))
vc_femalefirst = pd.value_counts(list(''.join(femalefirst_maleafter+femalefirst_femaleafter)))
# Load everything into a single dataframe
firstdata = [vc_malefirst,vc_femalefirst]
firstlabels = ['Male asks 1st question','Female asks 1st question']
firstnormdata = [x/x.sum() for x in firstdata]
df = pd.DataFrame(firstnormdata,index=firstlabels)
print df
# Plot stacked bar chart
ax = fig3.add_subplot(111)
dfplot = df.plot(kind='barh',stacked=True,ax=ax,color=gencolors,legend=True)
ax.set_xlabel('Fraction of total questions',fontsize=20)
###Output
M F
Male asks 1st question 0.694444 0.305556
Female asks 1st question 0.586538 0.413462
[2 rows x 2 columns]
###Markdown
When women ask the **first** question in a session, women ask on average 40% of the total number of questions in such sessions. However, when men ask the first question in a session, women only ask 31% of the total questions in the session.But this is clearly affected by the fact that the gender of the first question is fixed. To isolate that effect, let's look at the *remaining* questions in the session.
###Code
malefirst_maleafter = ['M'*x[1]['questions'][1:].count('M') for x in q.iterrows() if x[1]['questions'][0] == 'M']
malefirst_femaleafter = ['F'*x[1]['questions'][1:].count('F') for x in q.iterrows() if x[1]['questions'][0] == 'M']
vc_malefirst_remaining = pd.value_counts(list(''.join(malefirst_maleafter+malefirst_femaleafter)))
femalefirst_maleafter = ['M'*x[1]['questions'][1:].count('M') for x in q.iterrows() if x[1]['questions'][0] == 'F']
femalefirst_femaleafter = ["F"*x[1]['questions'][1:].count('F') for x in q.iterrows() if x[1]['questions'][0] == 'F']
vc_femalefirst_remaining = pd.value_counts(list(''.join(femalefirst_maleafter+femalefirst_femaleafter)))
# Load everything into a single dataframe
firstrdata = [vc_malefirst_remaining,vc_femalefirst_remaining]
firstrlabels = ['Male asks 1st question','Female asks 1st question']
firstrnormdata = [x/x.sum() for x in firstrdata]
#print firstrnormdata
dfr = pd.DataFrame(firstrnormdata,index=firstrlabels)
print dfr
# Plot stacked bar chart
fig = plt.figure(4,(6,6))
ax8 = fig.add_subplot(111)
dfplot = dfr.plot(kind='barh',stacked=True,ax=ax8,color=gencolors,legend=True)
ax8.set_xlabel('Fraction of remaining questions',fontsize=16)
ax8.set_xlim(0,1)
# Check statistical significance
# http://stats.stackexchange.com/questions/113602/test-if-two-binomial-distributions-are-statistically-different-from-each-other
def ztest(p1,p2,n1,n2):
pexp = (n1*p1exp + n2*p2exp) / (n1+n2)
z = (p1exp-p2exp) / np.sqrt(pexp*(1. - pexp)*(1/n1 + 1/n2))
return z
p1exp,p2exp = dfr['M']
n1 = len(list(''.join(malefirst_maleafter+malefirst_femaleafter)))
n2 = len(list(''.join(femalefirst_maleafter+femalefirst_femaleafter)))
z = ztest(p1exp,p2exp,n1,n2)
print 'z = %.3f' % z
###Output
M F
Male asks 1st question 0.666667 0.333333
Female asks 1st question 0.637931 0.362069
[2 rows x 2 columns]
z = 0.354
###Markdown
When the first question is eliminated from the data, the fraction of questions asked by females are nearly identical for females and males (33% and 36%, respectively). A two-proportion $z$-test indicates the distributions cannot reject the null hypothesis at $<1\sigma$ level. This result differs from the AAS meeting data, who found an extremely strong difference between the two. If the first question was asked by a male, then only 10% of the remaining questions were asked by a female. If the first question was asked by a female, 49% of the remaining questions were asked by a female. Does the gender of the session chair affect the distribution of the questioners' gender?
###Code
cdict = {}
for k,v in zip(c['block'].values,c['gender'].values):
cdict[k]=v
malechair_maleafter = ['M'*x[1]['questions'].count('M') for x in q.iterrows() if cdict[int(str(x[1]['session']).split('.')[0])] == 'M']
malechair_femaleafter = ['F'*x[1]['questions'].count('F') for x in q.iterrows() if cdict[int(str(x[1]['session']).split('.')[0])] == 'M']
femalechair_maleafter = ['M'*x[1]['questions'].count('M') for x in q.iterrows() if cdict[int(str(x[1]['session']).split('.')[0])] == 'F']
femalechair_femaleafter = ["F"*x[1]['questions'].count('F') for x in q.iterrows() if cdict[int(str(x[1]['session']).split('.')[0])] == 'F']
vc_malechair = pd.value_counts(list(''.join(malechair_maleafter+malechair_femaleafter)))
vc_femalechair = pd.value_counts(list(''.join(femalechair_maleafter+femalechair_femaleafter)))
# Load everything into a single dataframe
chairdata = [vc_malechair,vc_femalechair]
chairlabels = ['Male chair','Female chair']
chairnormdata = [x/x.sum() for x in chairdata]
df = pd.DataFrame(chairnormdata,index=chairlabels)
print df
# Plot stacked bar chart
fig5 = plt.figure(3,(6,6))
ax = fig5.add_subplot(111)
dfplot = df.plot(kind='barh',stacked=True,ax=ax,color=gencolors,legend=True)
ax.set_xlabel('Fraction of total questions',fontsize=20)
# Check statistical significance
p1exp,p2exp = df['M']
n1 = len(list(''.join(malechair_maleafter+malechair_femaleafter)))
n2 = len(list(''.join(femalechair_maleafter+femalechair_femaleafter)))
z = ztest(p1exp,p2exp,n1,n2)
###Output
M F
Male chair 0.640000 0.360000
Female chair 0.648649 0.351351
[2 rows x 2 columns]
###Markdown
When looking at the gender of the session chair (who might have selection biases when there are more questions than time permits), there is no difference in the gender distribution of questioners. In this case the data are nearly identical for male vs. female chairs, at 36% and 35% females respectively. This null result differs from the NAM data, who saw a small but significant decrease in the fraction of females asking questions when the chair was male (16%) as opposed to a female chair (22%). Is there a bias due to frequent questioners?
###Code
## sort on number of questions per unique personal identifier
## what happens is we exclude the X outliers?
###Output
_____no_output_____ |
equilibrium/equilibrium.ipynb | ###Markdown
![MOSEK ApS](https://www.mosek.com/static/images/branding/webgraphmoseklogocolor.png ) Equilibrium of a system of weights connected by strings/springsIn this notebook we show how to solve the following problem: Find the equlibrium of a system of masses connected by a system of strings, with some masses being assigned fixed coordinates (attached to the wall, say). See the next picture.![](basic.png)Suppose we have $n$ masses with weights $w_1,\ldots,w_n$, and the length of the string between $i$ and $j$ is $\ell_{ij}$ for some set $L$ of pairs of indices $(i,j)$ (we assume $\ell_{ij}$ is not defined if there is no connection). The strings themselves have no mass. We also have a set $F$ of indices such that the $i$-th point is fixed to have coordinates $f_i$ if $i\in F$. The equilibrium of the system is a configuration which minimizes potential energy. With this setup we can write our problem as:\begin{equation}\begin{array}{ll}minimize & g\cdot \sum_i w_ix_i^{(2)} \\s.t. & \|x_i-x_j\|\leq \ell_{ij},\ ij\in L \\ & x_i = f_i,\ i\in F\end{array}\end{equation}where $x\in (\mathbf{R}^n)^2$, $x_i^{(2)}$ denotes the second (vertical) coordinate of $x_i$ and $g$ is the gravitational constant.Here is a sample problem description.
###Code
w = [0.0, 1.1, 2.2, 0.0, 2.1, 2.2, 0.2]
l = {(0,1): 1.0, (1,2): 1.0, (2,3): 1.0, (1,4): 1.0, (4,5): 0.3, (5,2): 1.0, (5,6): 0.5, (1,3): 8.0}
f = {0: (0.0,1.0), 3: (2.0,1.0)}
g = 9.81
###Output
_____no_output_____
###Markdown
Now we can formulate the problem using Mosek Fusion:
###Code
from mosek.fusion import *
# w - masses of points
# l - lengths of strings
# f - coordinates of fixed points
# g - gravitational constant
def stringModel(w, l, f, g):
n, m = len(w), len(l)
starts = [ lKey[0] for lKey in l.keys() ]
ends = [ lKey[1] for lKey in l.keys() ]
M = Model("strings")
# Coordinates of points
x = M.variable("x", [n, 2])
# A is the signed incidence matrix of points and strings
A = Matrix.sparse(m, n, list(range(m))+list(range(m)), starts+ends, [1.0]*m+[-1.0]*m)
# ||x_i-x_j|| <= l_{i,j}
c = M.constraint("c", Expr.hstack(Expr.constTerm(list(l.values())), Expr.mul(A, x)),
Domain.inQCone() )
# x_i = f_i for fixed points
for i in f:
M.constraint(x.slice([i,0], [i+1,2]), Domain.equalsTo(list(f[i])).withShape([1,2]))
# sum (g w_i x_i_2)
M.objective(ObjectiveSense.Minimize,
Expr.mul(g, Expr.dot(w, x.slice([0,1], [n,2]))))
# Solve
M.solve()
if M.getProblemStatus(SolutionType.Interior) == ProblemStatus.PrimalAndDualFeasible:
return x.level().reshape([n,2]), c.dual().reshape([m,3])
else:
return None, None
###Output
_____no_output_____
###Markdown
Here is a quick description of how we use vectorization to deal with all the conic constraints in one go. The matrix $A$ is the incidence matrix between the masses and the strings, with coefficients $+1, -1$ for the two endpoints of each string. It is chosen so that the product $Ax$ has rows of the form$$(x_i^{(1)} - x_j^{(1)}, x_i^{(2)} - x_j^{(2)})$$for all pairs $i,j$ for which $\ell_{ij}$ is bounded. Stacking the values of $\ell$ in the left column produces a matrix with each row of the form$$(\ell_{ij}, x_i^{(1)} - x_j^{(1)}, x_i^{(2)} - x_j^{(2)})$$and a conic constraint is imposed on all the rows, as required.The objective and linear constraints show examples of slicing the variable $x$.The function returns the coordinates of the masses and the values of the dual conic variables. A zero dual value indicates that a particular string is hanging loose, and a nonzero value means it is fully stretched. All we need now is to define a display function and we can look at some plots.
###Code
%matplotlib inline
# x - coordinates of the points
# c - dual values of string length constraints
# d - pairs of points to connect
def display(x, c, d):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# Plot points
ax.scatter(x[:,0], x[:,1], color="r")
# Plot fully stretched strings (nonzero dual value) as solid lines, else dotted lines
for i in range(len(c)):
col = "b" if c[i][0] > 1e-4 else "b--"
ax.plot([x[d[i][0]][0], x[d[i][1]][0]], [x[d[i][0]][1], x[d[i][1]][1]], col)
ax.axis("equal")
plt.show()
x,c = stringModel(w, l, f, g)
if x is not None:
display(x, c, list(l.keys()))
###Output
_____no_output_____
###Markdown
How about we find a discrete approximation to the [catenary](https://en.wikipedia.org/wiki/Catenary):
###Code
n = 1000
w = [1.0]*n
l = {(i,i+1): 1.0/n for i in range(n-1)}
f = {0: (0.0,1.0), n-1: (0.7,1.0)}
g = 9.81
x,c = stringModel(w, l, f, g)
if x is not None:
display(x, c, list(l.keys()))
###Output
_____no_output_____
###Markdown
We can also have more suspension points and more complicated shapes:
###Code
n = 20
w = [1.0]*n
l = {(i,i+1): 0.09 for i in range(n-1)}
l.update({(5,14): 0.3})
f = {0: (0.0,1.0), 13: (0.5,0.9), 17: (0.7,1.1)}
g = 9.81
x,c = stringModel(w, l, f, g)
if x is not None:
display(x, c, list(l.keys()))
###Output
_____no_output_____
###Markdown
Duality and feasibilityThe dual problem is as follows:\begin{equation}\begin{array}{ll}maximize & -\sum_{ij\in L}\ell_{ij}y_{ij} - \sum_{i\in F}f_i\circ z_i\\s.t. & y_{ij}\geq \|v_{ij}\|,\ ij\in L \\ & \sum_{j~:~ij\in L} v_{ij}\mathrm{sgn}_{ij} + \left(\begin{array}{c}0\\ gw_i\end{array}\right) +z_i = 0, \ i=1,\ldots,n\end{array}\end{equation}where $\mathrm{sgn}_{ij}=+1$ if $i>j$ and $-1$ otherwise and $\circ$ is the dot product. The variables are $(y_{ij},v_{ij})\in \mathbf{R}\times\mathbf{R}^2$ for $ij\in L$ and $z_i\in\mathbf{R}^2$ for $i\in F$ (we assume $z_i=0$ for $i\not\in F$).Obviously (!) the linear constraints describe the equilibrium of forces at every mass. The ingredients are: the vectors of forces applied through adjacent strings ($v_{ij}$), gravity, and the attaching force holding a fixed point in its position. By proper use of vectorization this is much easier to express in Fusion than it looks:
###Code
def dualStringModel(w, l, f, g):
n, m = len(w), len(l)
starts = [ lKey[0] for lKey in l.keys() ]
ends = [ lKey[1] for lKey in l.keys() ]
M = Model("dual strings")
x = M.variable(Domain.inQCone(m,3)) #(y,v)
y = x.slice([0,0],[m,1])
v = x.slice([0,1],[m,3])
z = M.variable([n,2])
# z_i = 0 if i is not fixed
for i in range(n):
if i not in f:
M.constraint(z.slice([i,0], [i+1,2]), Domain.equalsTo(0.0))
B = Matrix.sparse(m, n, list(range(m))+list(range(m)), starts+ends, [1.0]*m+[-1.0]*m).transpose()
w2 = Matrix.sparse(n, 2, range(n), [1]*n, [-wT*g for wT in w])
# sum(v_ij *sgn(ij)) + z_i = -(0, gw_i) for all vertices i
M.constraint(Expr.add( Expr.mul(B, v), z ), Domain.equalsTo(w2))
# Objective -l*y -fM*z
fM = Matrix.sparse(n, 2, list(f.keys())+list(f.keys()), [0]*len(f)+[1]*len(f),
[pt[0] for pt in f.values()] + [pt[1] for pt in f.values()])
M.objective(ObjectiveSense.Maximize, Expr.neg(Expr.add(Expr.dot(list(l.values()), y),Expr.dot(fM, z))))
M.solve()
###Output
_____no_output_____
###Markdown
Let us quickly discuss the possible situations regarding feasibility:* The system has an equilibrium --- the problem is **primal feasible** and **dual feasible**.* The strings are too short and it is impossible to stretch the required distance between fixed points --- the problem is **primal infeasible**.* The system has a component that is not connected to any fixed point, hence some masses can keep falling down indefinitely, causing the problem **primal unbounded**. Clearly the forces within such component cannot be balanced, so the problem is **dual infeasible**. SpringsWe can extend this to consider infinitely strechable springs instead of fixed-length strings connecting the masses. The next model appears in [Applications of SOCP](http://stanford.edu/~boyd/papers/pdf/socp.pdf) by Lobo, Boyd, Vandenberghe, Lebret. We will now interpret $\ell_{ij}$ as the base length of the spring and assume that the elastic potential energy stored in the spring at length $x$ is $$E_{ij}=\left\{\begin{array}{ll}0 & x\leq \ell_{ij}\\ \frac{k}{2}(x-\ell_{ij})^2 & x>\ell_{ij}\end{array}\right.$$That leads us to consider the following second order cone program minimizing the total potential energy:\begin{equation}\begin{array}{ll}minimize & g\cdot \sum_i w_ix_i^{(2)} + \frac{k}{2}\sum_{ij\in L} t_{ij}^2 \\s.t. & \|x_i-x_j\|\leq \ell_{ij}+t_{ij},\ ij\in L \\ & 0\leq t_{ij},\ ij\in L \\ & x_i = f_i,\ i\in F\end{array}\end{equation}If $t$ denotes the vector of $t_{ij}$ then using a rotated quadratic cone for $(1,T,t)$:$$2\cdot 1\cdot T\geq \|t\|^2$$will place a bound on $\frac12\sum t_{ij}^2$. We now have a simple extension of the first model.
###Code
# w - masses of points
# l - lengths of strings
# f - coordinates of fixed points
# g - gravitational constant
# k - stiffness coefficient
def elasticModel(w, l, f, g, k):
n, m = len(w), len(l)
starts = [ lKey[0] for lKey in l.keys() ]
ends = [ lKey[1] for lKey in l.keys() ]
M = Model("strings")
x = M.variable("x", [n, 2]) # Coordinates
t = M.variable(m, Domain.greaterThan(0.0)) # Streching
T = M.variable(1) # Upper bound
M.constraint(Expr.vstack(T, Expr.constTerm(1.0), t), Domain.inRotatedQCone())
# A is the signed incidence matrix of points and strings
A = Matrix.sparse(m, n, list(range(m))+list(range(m)), starts+ends, [1.0]*m+[-1.0]*m)
# ||x_i-x_j|| <= l_{i,j} + t_{i,j}
c = M.constraint("c", Expr.hstack(Expr.add(t, Expr.constTerm(list(l.values()))), Expr.mul(A, x)),
Domain.inQCone() )
# x_i = f_i for fixed points
for i in f:
M.constraint(x.slice([i,0], [i+1,2]), Domain.equalsTo(list(f[i])).withShape([1,2]))
# sum (g w_i x_i_2) + k*T
M.objective(ObjectiveSense.Minimize,
Expr.add(Expr.mul(k,T), Expr.mul(g, Expr.dot(w, x.slice([0,1], [n,2])))))
# Solve
M.solve()
if M.getProblemStatus(SolutionType.Interior) == ProblemStatus.PrimalAndDualFeasible:
return x.level().reshape([n,2]), c.dual().reshape([m,3])
else:
return None, None
n = 20
w = [1.0]*n
l = {(i,i+1): 0.09 for i in range(n-1)}
l.update({(5,14): 0.3})
f = {0: (0.0,1.0), 13: (0.5,0.9), 17: (0.7,1.1)}
g = 9.81
k = 800
x, c = elasticModel(w, l, f, g, k)
if x is not None:
display(x, c, list(l.keys()))
###Output
_____no_output_____ |
homeworks/group2/HW_2/HW2-part3_group2.ipynb | ###Markdown
ASTR 598 Astrostatistics HW2 Part 3 Hayden Smotherman, Chris Suberlack, Winnie Wang To run this Notebook:The Galfast data must be extracted from the projects/ directory as a .txt file and this notebook must be run in the homeworks/group2/HW_2/ directory.
###Code
# Imports
%matplotlib inline
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.table import hstack
from astropy.table import vstack
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import os
import numpy as np
from astropy.io import fits
import pandas as pd
from scipy.stats import binned_statistic_2d as bs2d
from scipy.stats import binned_statistic as bs1d
import seaborn as sns
GalFastData = np.loadtxt('../../../project/Galfast-Stripe82.txt',usecols=(0,1,4,5,12,13,14))
GalFastTable = Table(rows=GalFastData, names=('ra','dec','pmra','pmdec','gmag','rmag','imag'))
# Generate the magnitude mask used in the Hess diagram data analysis
def Hess_r_v_gminusi(aptable,total_mask):
constant = 3 # This is a fudge parameter in determining number of bins
nObjects = np.sum(total_mask)
num_bins = int(constant * nObjects ** (1.0 / 4.0))
# Now calculate the binned proper motions
proper_motion = np.sqrt(aptable['pmra']**2+aptable['pmdec']**2)
total_mask = proper_motion<100*total_mask
Binned_PM = bs2d(aptable['ra'][total_mask],aptable['rmag'][total_mask],proper_motion[total_mask], bins = num_bins)
#cmin = min(np.log10(Binned_PM.statistic.T[Binned_PM.statistic.T > 0]))
#cmax = max(np.log10(Binned_PM.statistic.T[Binned_PM.statistic.T > 0]))
# Define custom colormaps: Set pixels with no sources to white
cmap = plt.cm.viridis
cmap.set_bad('w', 1.)
plt.figure(figsize=[8,8])
plt.imshow(Binned_PM.statistic.T, origin='lower',
extent=[Binned_PM.x_edge[0], Binned_PM.x_edge[-1], Binned_PM.y_edge[0], Binned_PM.y_edge[-1]],
aspect='auto', interpolation='nearest', cmap=cmap)
cb = plt.colorbar(orientation='horizontal')
cb.set_label(r'Proper Motion [mas/yr]',fontsize=16)
#plt.clim(0, 30) # This was set by hand to draw out as much detail as possible
plt.xlabel(r'RA [degree]',fontsize=16)
plt.ylabel(r'r',fontsize=16)
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
# Make the color masks
r_mask = (GalFastTable['rmag']>20.5) & (GalFastTable['rmag']<21)
gminusi_mask = ((GalFastTable['gmag']-GalFastTable['imag']) > 0.3) & ((GalFastTable['gmag']-GalFastTable['imag']) < 0.4)
mag_mask = r_mask * gminusi_mask
# Make the RA Masks
RA_mask_25to40 = (GalFastTable['ra'] > 25) & (GalFastTable['ra'] < 40)
RA_mask_0to15 = (GalFastTable['ra'] > 0) & (GalFastTable['ra'] < 15)
# Make the net masks
mask_25to40 = mag_mask * RA_mask_25to40
mask_0to15 = mag_mask * RA_mask_0to15
# Make the Hess diagram for 25 < RA < 40
Hess_r_v_gminusi(GalFastTable,mask_25to40)
plt.title(r'Proper Motion for $25^\circ < \mathrm{RA} < 40^\circ$',fontsize=20)
plt.savefig('hw2_3_GalFast_pm_Hess_Diagram_RA25to40') # Save the figure
# Make the Hess diagram for 0 < RA < 15
Hess_r_v_gminusi(GalFastTable,mask_0to15)
plt.title(r'Proper Motion for $0^\circ < \mathrm{RA} < 15^\circ$',fontsize=20)
plt.savefig('hw2_3_GalFast_pm_Hess_Diagram_RA0to15') # Save the figure
###Output
_____no_output_____
###Markdown
The Galfast data is consistent with the NSC data in the stripe 82 region. The Galfast data shows a slight proper motion bimoodaility for stars in $25^\circ < \mathrm{RA} < 40^\circ$ while it shows little to no bimodality for stars in $0^\circ < \mathrm{RA} < 15^\circ$. Note that the color bar scale is different for each image. This mirrors what we saw in the NSC data in the same region, although the NSC dataset is too sparse to fully flesh-out the diagram.
###Code
def ProperMotionHist(aptable,mask_noRAcuts,xmin=-5,xmax=5,normed='True'):
# This function makes two histograms of RA and DEC Proper motions for two different RA cuts
# Calculate the RA Proper Motion
mask_noRAcuts *= aptable['pmra']<50
# Make two masks that have the RA cuts included
RA_mask_0to15 = (aptable['ra'] > 0) & (aptable['ra'] < 15)
RA_mask_25to40 = (aptable['ra'] > 25) & (aptable['ra'] < 40)
# Mask things outside the range of the histogram
mask_pm = (xmin < aptable['pmra'])*(aptable['pmra'] < xmax)
# Combine masks
mask_0to15 = mask_noRAcuts * RA_mask_0to15 * mask_pm
mask_25to40 = mask_noRAcuts * RA_mask_25to40 * mask_pm
#Plot the two distributions with different RA cuts
plt.figure(figsize=[12,8])
plt.hist(aptable['pmra'][mask_0to15],alpha=0.5,bins=30,
normed=normed,linewidth=3,color='r')
plt.hist(aptable['pmra'][mask_25to40],alpha=0.5,bins=30,
normed=normed,linewidth=3,color='b')
plt.legend([r'$0^\circ < \mathrm{RA} < 15^\circ$',
r'$25^\circ < \mathrm{RA} < 40^\circ$'],fontsize=16)
plt.title('Distribution of RA Proper Motions for different RA cuts',fontsize=20)
plt.xlabel('RA Proper Motion [mas/yr]',fontsize=16)
plt.ylabel('Normed number density',fontsize=16)
plt.xlim([xmin,xmax])
#plt.ylim([1,200])
plt.savefig('hw2_3_pm_hist_ra.png', bbox_inches='tight')
# Calculate the DEC Proper Motion
mask_noRAcuts *= aptable['pmdec']<50
# Make two masks that have the RA cuts included
RA_mask_0to15 = (aptable['ra'] > 0) & (aptable['ra'] < 15)
RA_mask_25to40 = (aptable['ra'] > 25) & (aptable['ra'] < 40)
# Mask things outside the range of the histogram
mask_pm = (xmin < aptable['pmdec'])*(aptable['pmdec'] < xmax)
# Combine masks
mask_0to15 = mask_noRAcuts * RA_mask_0to15 * mask_pm
mask_25to40 = mask_noRAcuts * RA_mask_25to40 * mask_pm
#Plot the two distributions with different RA cuts
plt.figure(figsize=[12,8])
plt.hist(aptable['pmdec'][mask_0to15],alpha=0.5,bins=30,
normed=normed,linewidth=3,color='r')
plt.hist(aptable['pmdec'][mask_25to40],alpha=0.5,bins=30,
normed=normed,linewidth=3,color='b')
plt.legend([r'$0^\circ < \mathrm{RA} < 15^\circ$',
r'$25^\circ < \mathrm{RA} < 40^\circ$'],fontsize=16)
plt.title('Distribution of DEC Proper Motions for different RA cuts',fontsize=20)
plt.xlabel('DEC Proper Motion [mas/yr]',fontsize=16)
plt.ylabel('Normed number density',fontsize=16)
plt.xlim([xmin,xmax])
#plt.ylim([1,200])
plt.savefig('hw2_3_pm_hist_dec.png', bbox_inches='tight')
# Make some universal cuts
r_mask = (GalFastTable['rmag']>20.5) & (GalFastTable['rmag']<21)
gminusi_mask = ((GalFastTable['gmag']-GalFastTable['imag']) > 0.3) &\
((GalFastTable['gmag']-GalFastTable['imag']) < 0.4)
mag_mask = r_mask * gminusi_mask
ProperMotionHist(GalFastTable,mag_mask, -5,5)
###Output
_____no_output_____ |
notebooks/thesis/xcmove_joint_sanity_checking.ipynb | ###Markdown
Current alignment from wave node
###Code
pred_full = wn.tssm.mean_obs(wn.npts)
actual_full = wn.get_value()
plt.figure(figsize=(14, 4))
lg_idx = time_to_index(pred_atime-5.0, wn.st, wn.srate)
plt.plot(actual_full[lg_idx:lg_idx + 300])
plt.plot(pred_full[lg_idx:lg_idx + 300])
###Output
_____no_output_____
###Markdown
Current alignment in template xc move
###Code
pred_signal = pred_wavelet * env
plt.figure(figsize=(14, 4))
plt.plot(relevant_signal[backwards_idx:backwards_idx + len(pred_signal)])
plt.plot(pred_signal)
###Output
_____no_output_____
###Markdown
Proposed alignment in template xc move
###Code
pred_signal = pred_wavelet * env
plt.figure(figsize=(14, 4))
plt.plot(relevant_signal[proposed_idx:proposed_idx + len(pred_signal)])
plt.plot(pred_signal)
n_atime.set_value(current_atime)
wn._parent_values()
pred_full = wn.tssm.mean_obs(wn.npts)
actual_full = wn.get_value()
plt.figure(figsize=(14, 4))
lg_idx = time_to_index(pred_atime-5.0, wn.st, wn.srate)
plt.plot(actual_full[lg_idx:lg_idx + 300])
plt.plot(pred_full[lg_idx:lg_idx + 300])
for (eeid, pphase, scale, sidx, npts, component_type) in wn.tssm_components:
if eeid != eid or pphase != phase: continue
if component_type != "wavelet": continue
print sidx, npts
print 807-relevant_sidx
print backwards_idx
print time_to_index(relevant_stime, wn.st, wn.srate)
print relevant_sidx
#time_to_index(current_atime, wn.st, wn.srate)
print (current_atime - wn.st) * wn.srate
print (current_atime - relevant_stime) * wn.srate
###Output
807.311899662
180.826158524
|
docs/tutorial/T4-Characteristics.ipynb | ###Markdown
T4 - CharacteristicsCharacteristics can be conceptually difficult to define, but are fairly simple in practice. They are essentially special parameters, that are specific to working with compartments and groups of compartments. We will motivate their design with a worked example that builds on the multi-population framework from T3, and then conclude by discussing other aspects of their design that differ from parameters.The key functionality provided by characteristics is this - the example in T3 had users initialize the compartment sizes by directly entering values for the number of people in the 'Susceptible' and 'Infected' compartments, both of which appeared on the 'Stocks' sheet in the databook.![t4-framework1](assets/T4/t4_framework_1.png)However, typically country data does not correspond directly to the compartments in the databook. For example, suppose we know - The total number of people alive- The number of people who have ever been infected- The proportion of infections that have now been resolvedWe could use this data to work out what the corresponding compartment sizes should be. For example, if we know that there are 1000 people in total, of whom 400 have ever been infected, and of which 75% of infections have been resolved, then the corresponding initial compartment sizes would be- `sus = 600`- `inf = 100`- `rec = 300`which satisfies that `sus+inf+rec=1000`, and `inf+rec=400`, and `rec/(inf+rec)=300`. The motivation for characteristics is that we want the databook to contain data entry for the total number of people, the number ever infected, and the proportion resolved, because those are the values corresponding to the available data. We would like Atomica to work out the corresponding compartment sizes, rather than having to do the calculation manually. To do this, we need to store the information in the framework that we have quantities- `alive = sus+inf+rec`- `ever_inf = inf+rec`- `prop_resolved = rec/ever_inf` and have these quantities appear in the databook instead of the compartments themselves. We could achieve the required data entry using parameters. However, we can't use the parameters to initialize compartments. This is why there is a separate system, 'characteristics', that allows expressions of groups of compartments to be used for initialization. We can set up the three characteristics defined above in a fairly straightforward way on the 'Characteristics' sheet. Rather than writing the formulas above with '+' and '/' operations, we instead provide a comma separated list of compartments (or other characteristics) to sum (in the 'components' column) and we provide the denominator separately in the 'denominator' column. So the corresponding characteristics sheet is![t4-framework2](assets/T4/t4_framework_2.png)We will also remove the 'Databook page' for the compartments on the the compartments sheet, since we want to initialize the model using characteristics only. If we create a databook from the framework as usual, we will have updated data entry tables on the 'Stocks' sheet. We can then go ahead and fill them out with the initialization described above:![t4-databook1](assets/T4/t4_databook_1.png)The framework and databook are available in the Atomica repository under `atomica/docs/tutorial/assets/t4_framework_1.xlsx` and `atomica/docs/tutorial/assets/t4_databook_1.xlsx`, respectively. We can now load these files in and run a simulation:
###Code
import atomica as at
P = at.Project(framework='assets/T4/t4_framework_1.xlsx',databook='assets/T4/t4_databook_1.xlsx')
result = P.results[0]
###Output
_____no_output_____
###Markdown
We now want to check that the initialization has been performed correctly. In the `result` we can retrieve the variables for the compartment sizes and inspect their values at the first timestep
###Code
print('sus = %.2f' % (result.get_variable('sus')[0].vals[0]))
print('inf = %.2f' % (result.get_variable('inf')[0].vals[0]))
print('rec = %.2f' % (result.get_variable('rec')[0].vals[0]))
###Output
_____no_output_____
###Markdown
So we have successfully used characteristics to have Atomica automatically convert from the aggregated data values to the underlying compartment values. Under the hood, we are solving a system of simultaneous equations. What happens if there are more unknowns than there are equations? This corresponds to the system being 'underdetermined'. An example would be, suppose we know that there are 1000 people in total, of whom 400 have ever been infected, but we don't know the proportion of people whose infections have been resolved. How do we then decide whether we have 100 infected and 300 recovered, or 300 infected and 100 recovered? Atomica uses the 'minimum norm' solution which means that the inputs are distributed equally across groups of compartments that are nonzero, and is zero if no information is available. We will see this with two examples. First, consider the case above where we only know the total population size and number ever infected. This corresponds to the framework and databook containing![t4-framework-3](assets/T4/t4_framework_3.png)![t4-databook-2](assets/T4/t4_databook_2.png)The minimum norm solution would see the 400 people uniformly distributed across `inf` and `rec`, so there will be 200 people in each compartment. If we run the model with these spreadsheets, we obtain
###Code
import atomica as at
P = at.Project(framework='assets/T4/t4_framework_2.xlsx',databook='assets/T4/t4_databook_2.xlsx')
result = P.results[0]
print('sus = %.2f' % (result.get_variable('sus')[0].vals[0]))
print('inf = %.2f' % (result.get_variable('inf')[0].vals[0]))
print('rec = %.2f' % (result.get_variable('rec')[0].vals[0]))
###Output
_____no_output_____
###Markdown
We also now recieve a warning that 'Initialization characteristics are underdetermined' which reflects the fact that we had to rely on the minimum norm solution to infer the value of some of the compartments. For compartments that are missing entirely, we can remove the 'alive' characteristic entirely, leaving us with:![t4-framework-4](assets/T4/t4_framework_4.png)![t4-databook-3](assets/T4/t4_databook_3.png)Now, we expect that the 400 people will be assigned to `inf` and `rec` in equal proportions, but since we have no information at all about `sus`, it will be initialized with a value of zero:
###Code
import atomica as at
P = at.Project(framework='assets/T4/t4_framework_3.xlsx',databook='assets/T4/t4_databook_3.xlsx')
result = P.results[0]
print('sus = %.2f' % (result.get_variable('sus')[0].vals[0]))
print('inf = %.2f' % (result.get_variable('inf')[0].vals[0]))
print('rec = %.2f' % (result.get_variable('rec')[0].vals[0]))
###Output
_____no_output_____ |
Simple2DRegression/Simple2DRegression.ipynb | ###Markdown
Inspecting calculate_z by plotting x and y projections
###Code
# z function
data_non_zero = np.random.rand(100) * 100
data_zero = np.zeros(100)
plt.plot(
data_non_zero,
calculate_z(data_non_zero, data_zero, smearing=True),
linestyle='none',
marker='o',
markersize=3
)
plt.plot(
data_non_zero,
calculate_z(data_zero, data_non_zero, smearing=True),
linestyle='none',
marker='o',
markersize=3
)
ax = plt.gca()
ylim = ax.get_ylim()
ax.set_ylim(0, ylim[1])
###Output
_____no_output_____
###Markdown
Creating a large dataset
###Code
def create_dataset(size):
data_x = np.random.rand(size) * 100
data_y = np.random.rand(size) * 100
data_z = calculate_z(data_x, data_y, smearing=True)
df = pd.DataFrame({'x': data_x, 'y': data_y, 'z': data_z})
return df
# Creating dataset for plotting
data = create_dataset(100_000)
data
###Output
_____no_output_____
###Markdown
Plotting data
###Code
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter3D(data['x'], data['y'], data['z'], c='g', s=0.001)
###Output
_____no_output_____
###Markdown
Scaling and splitting data
###Code
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# Splitting data
features = np.array(data[['x', 'y']])
target = np.array(data[['z']])
# target = np.ravel(target)
features_train, features_test, target_train, target_test = train_test_split(
features,
target,
random_state=1
)
# Scaling data
scaler_features = StandardScaler()
scaler_features.fit(features_train)
features_train_scaled = scaler_features.transform(features_train)
features_test_scaled = scaler_features.transform(features_test)
scaler_target = StandardScaler()
scaler_target.fit(target_train)
target_train_scaled = scaler_target.transform(target_train)
###Output
_____no_output_____
###Markdown
MLP Regression
###Code
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import r2_score
reg = MLPRegressor(
hidden_layer_sizes=(6, 6),
activation="relu",
random_state=1,
max_iter=2000
).fit(features_train_scaled, np.ravel(target_train_scaled))
pred_test = reg.predict(features_test_scaled)
pred_test = scaler_target.inverse_transform(pred_test)
print(pred_test.shape)
abs_deviation = (pred_test - np.ravel(target_test))
rel_deviation = abs_deviation / np.ravel(target_test)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
ax1.hist(rel_deviation, bins=20)
ax2.hist(abs_deviation, bins=20)
mean_abs = np.mean(abs_deviation)
std_abs = np.std(abs_deviation)
print('Abs: {:.3} +/- {:.3}'.format(mean_abs, std_abs))
mean_rel = np.mean(rel_deviation)
std_rel = np.std(rel_deviation)
print('Rel: {:.3} +/- {:.3}'.format(mean_rel, std_rel))
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(12, 8))
ms = 2
mm = 'o'
ax1.plot(
features_test[:,0],
rel_deviation,
linestyle='none',
marker=mm,
markersize=ms
)
ax1.set_ylabel('relative deviation')
ax1.set_xlabel('x')
ax2.plot(
features_test[:,1],
rel_deviation,
linestyle='none',
marker=mm,
markersize=ms
)
ax2.set_ylabel('relative deviation')
ax2.set_xlabel('y')
ax3.plot(
features_test[:,0],
abs_deviation,
linestyle='none',
marker=mm,
markersize=ms
)
ax3.set_ylabel('absolute deviation')
ax3.set_xlabel('x')
ax4.plot(
features_test[:,1],
abs_deviation,
linestyle='none',
marker=mm,
markersize=ms
)
ax4.set_ylabel('absolute deviation')
ax4.set_xlabel('y')
features_test[:,0]
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter3D(features_test[:,0], features_test[:,1], abs_deviation, c='g', s=0.001)
score = reg.score(features_test, target_test)
score
###Output
_____no_output_____ |
notebooks/Seasonality_Trends_Week_1_Lesson_2.ipynb | ###Markdown
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Lesson 2In the screencast for this lesson I go through a few scenarios for time series. This notebook contains the code for that with a few little extras! :) Setup
###Code
!pip install -U tf-nightly-2.0-preview
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
###Output
_____no_output_____
###Markdown
Trend and Seasonality
###Code
def trend(time, slope=0):
return slope * time
###Output
_____no_output_____
###Markdown
Let's create a time series that just trends upward:
###Code
time = np.arange(4 * 365 + 1)
baseline = 10
series = trend(time, 0.1)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
###Output
_____no_output_____
###Markdown
Now let's generate a time series with a seasonal pattern:
###Code
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
baseline = 10
amplitude = 40
series = seasonality(time, period=365, amplitude=amplitude)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
###Output
_____no_output_____
###Markdown
Now let's create a time series with both trend and seasonality:
###Code
slope = 0.05
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
###Output
_____no_output_____
###Markdown
Noise In practice few real-life time series have such a smooth signal. They usually have some noise, and the signal-to-noise ratio can sometimes be very low. Let's generate some white noise:
###Code
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
plt.figure(figsize=(10, 6))
plot_series(time, noise)
plt.show()
###Output
_____no_output_____
###Markdown
Now let's add this white noise to the time series:
###Code
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
###Output
_____no_output_____
###Markdown
All right, this looks realistic enough for now. Let's try to forecast it. We will split it into two periods: the training period and the validation period (in many cases, you would also want to have a test period). The split will be at time step 1000.
###Code
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
def autocorrelation(time, amplitude, seed=None):
rnd = np.random.RandomState(seed)
φ1 = 0.5
φ2 = -0.1
ar = rnd.randn(len(time) + 50)
ar[:50] = 100
for step in range(50, len(time) + 50):
ar[step] += φ1 * ar[step - 50]
ar[step] += φ2 * ar[step - 33]
return ar[50:] * amplitude
def autocorrelation(time, amplitude, seed=None):
rnd = np.random.RandomState(seed)
φ = 0.8
ar = rnd.randn(len(time) + 1)
for step in range(1, len(time) + 1):
ar[step] += φ * ar[step - 1]
return ar[1:] * amplitude
series = autocorrelation(time, 10, seed=42)
plot_series(time[:200], series[:200])
plt.show()
series = autocorrelation(time, 10, seed=42) + trend(time, 2)
plot_series(time[:200], series[:200])
plt.show()
series = autocorrelation(time, 10, seed=42) + seasonality(time, period=50, amplitude=150) + trend(time, 2)
plot_series(time[:200], series[:200])
plt.show()
series = autocorrelation(time, 10, seed=42) + seasonality(time, period=50, amplitude=150) + trend(time, 2)
series2 = autocorrelation(time, 5, seed=42) + seasonality(time, period=50, amplitude=2) + trend(time, -1) + 550
series[200:] = series2[200:]
#series += noise(time, 30)
plot_series(time[:300], series[:300])
plt.show()
def impulses(time, num_impulses, amplitude=1, seed=None):
rnd = np.random.RandomState(seed)
impulse_indices = rnd.randint(len(time), size=10)
series = np.zeros(len(time))
for index in impulse_indices:
series[index] += rnd.rand() * amplitude
return series
series = impulses(time, 10, seed=42)
plot_series(time, series)
plt.show()
def autocorrelation(source, φs):
ar = source.copy()
max_lag = len(φs)
for step, value in enumerate(source):
for lag, φ in φs.items():
if step - lag > 0:
ar[step] += φ * ar[step - lag]
return ar
signal = impulses(time, 10, seed=42)
series = autocorrelation(signal, {1: 0.99})
plot_series(time, series)
plt.plot(time, signal, "k-")
plt.show()
signal = impulses(time, 10, seed=42)
series = autocorrelation(signal, {1: 0.70, 50: 0.2})
plot_series(time, series)
plt.plot(time, signal, "k-")
plt.show()
series_diff1 = series[1:] - series[:-1]
plot_series(time[1:], series_diff1)
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(series)
from statsmodels.tsa.arima_model import ARIMA
model = ARIMA(series, order=(5, 1, 0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
df = pd.read_csv("sunspots.csv", parse_dates=["Date"], index_col="Date")
series = df["Monthly Mean Total Sunspot Number"].asfreq("1M")
series.head()
series.plot(figsize=(12, 5))
series["1995-01-01":].plot()
series.diff(1).plot()
plt.axis([0, 100, -50, 50])
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(series)
autocorrelation_plot(series.diff(1)[1:])
autocorrelation_plot(series.diff(1)[1:].diff(11 * 12)[11*12+1:])
plt.axis([0, 500, -0.1, 0.1])
autocorrelation_plot(series.diff(1)[1:])
plt.axis([0, 50, -0.1, 0.1])
116.7 - 104.3
[series.autocorr(lag) for lag in range(1, 50)]
pd.read_csv(filepath_or_buffer, sep=',', delimiter=None, header='infer', names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, skipfooter=0, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, iterator=False, chunksize=None, compression='infer', thousands=None, decimal=b'.', lineterminator=None, quotechar='"', quoting=0, doublequote=True, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=None, error_bad_lines=True, warn_bad_lines=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None)
Read a comma-separated values (csv) file into DataFrame.
from pandas.plotting import autocorrelation_plot
series_diff = series
for lag in range(50):
series_diff = series_diff[1:] - series_diff[:-1]
autocorrelation_plot(series_diff)
import pandas as pd
series_diff1 = pd.Series(series[1:] - series[:-1])
autocorrs = [series_diff1.autocorr(lag) for lag in range(1, 60)]
plt.plot(autocorrs)
plt.show()
###Output
_____no_output_____ |
exp/mnist-compare/notebooks/1011 - quantitative evaluation (barplot).ipynb | ###Markdown
Quatitative EvaluationFlipping the top pixels and see how much logodds drop in terms of the top-1 classification error.
###Code
%matplotlib inline
import torch
import os
import pandas as pd
import seaborn as sns
###Output
_____no_output_____
###Markdown
Take out all the files end with 'records.th'
###Code
def get_file_names(directory):
directory = os.path.join('../result', directory)
result = []
for filename in os.listdir(directory):
if filename.endswith("records.th"):
result.append(filename)
return result
arr = []
identifiers = ['1013-vbd_l1_opposite-0.1']
for directory in identifiers:
filenames = get_file_names(directory)
arr.append(filenames)
import matplotlib.pyplot as plt
import numpy as np
def plot_given_file(ax, filepath, name):
orig_log_odds, all_log_odds, unnormalized_img, imp_vector, rodds = \
torch.load(filepath)
x_flip = np.array([0] + all_log_odds.keys())
y_flip = np.array([orig_log_odds] + all_log_odds.values())
ax.plot(x_flip, y_flip, label='{} flip'.format(name))
if 'p_b' not in name:
x = [k for k, v in rodds]
y = np.array([v for k, v in rodds])
ax.scatter(x, y, label='{} random'.format(name))
return unnormalized_img
for idx in xrange(10):
fig, ax = plt.subplots()
for name in [
# '1013-vbd_l1_opposite-0.1/8_{}_records.th'.format(idx),
# '1013-vbd_l1_opposite-1E-3/8_{}_records.th',.format(idx)
# '1013-vbd_l1_opposite-1E-4', '1013-vbd_l1_opposite-1E-5',
# '1013-vbd_l1_opposite-1E-6/8_{}_records.th',.format(idx)
'1013-vbd_l1_opposite-0/8_{}_records.th'.format(idx),
'1018-vbd_l1-0/8_{}_records.th'.format(idx),
'1013-vbd_opposite-0.5-0.1/8_{}_records.th'.format(idx),
# '1013-vbd_opposite-0.5-1.0/8_{}_records.th'.format(idx),
'1013-p_b/8_3_{}_records.th'.format(idx)
]:
path = '../result/{}'.format(name)
thereal_name = name.split('/')[0]
unnormalized_img = plot_given_file(ax, path, name=thereal_name)
# plot_given_file(ax, '../imgs/val_benchmark/0927_ae_hole_p_b_val/{}'.format(arr[0][idx]), name='p_b')
plt.ylabel('Log odds')
plt.legend(bbox_to_anchor=(1, 1))
plt.show()
identifiers = [ '1005-p_b',
'1005-vbd-p0.5-0.001', '1005-vbd-p0.5-0.01', '1005-vbd-p0.5-0.1',
'1005-vbd-p0.999-1E-4', '1005-vbd-p0.999-1E-5', '1005-vbd-p0.999-1E-6',
'1005-vbdl1-1E-3', '1005-vbdl1-1E-4', '1005-vbdl1-1E-5',
]
arr = [get_file_names('../result/{}'.format(i)) for i in identifiers]
def prepare_pd_table(arr, identifiers):
result = []
for i in xrange(len(identifiers)):
identifier = identifiers[i]
for j in xrange(len(arr[i])):
orig_log_odds, all_log_odds_dict, unnormalized_img, imp_vector = \
torch.load(os.path.join('../result', '%s' % identifier, arr[i][j]))
for key in all_log_odds_dict:
log_odds_drop = orig_log_odds - all_log_odds_dict[key]
result.append([identifier + '(n = %d)' % (len(arr[i])), j, key, log_odds_drop])
result = pd.DataFrame(result)
result.columns = ['method', 'img_index', 'num_flippings', 'odds_diff']
return result
###Output
_____no_output_____
###Markdown
Old
###Code
orig_log_odds, all_log_odds, unnormalized_img, imp_vector, rodds = \
torch.load('../result/1007-vbd_l1_opposite-0.1/' + arr[0][0])
###Output
_____no_output_____
###Markdown
Compare btw vbd, vbdf1, vbd 0.999Notice only 20 images here
###Code
table = prepare_pd_table(arr, identifiers)
ax = sns.boxplot(x="num_flippings", y="odds_diff", hue="method", data=table)
ax.legend(bbox_to_anchor=(1, 1))
###Output
_____no_output_____ |
podcast-builder/podcast_builder.ipynb | ###Markdown
Podcast Builder In this example, we will be using Amazon Polly text to speech service to convert pain text into an audio file. Installing pre-requisite libraries
###Code
%pip install boto3
###Output
_____no_output_____
###Markdown
Reading the text for your podcast script
###Code
f = open("script.txt", "r", encoding = "utf8")
text_to_convert = f.read()
print(text_to_convert)
f.close()
###Output
_____no_output_____
###Markdown
Setting up account to access Amazon Polly text to speech serviceCreate a new user from AWS console for programmatic access and assign the policy `AmazonPollyFullAccess` to this user. ``` json{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "polly:*" ], "Resource": [ "*" ] } ]}```Now download or copy the `access key ID` and `secret access key` for the newly created user. Store these credential information at this location (`~/.aws/credentials`) in INI format. ``` INI[api_user]aws_access_key_id = aws_secret_access_key = region = ``` Calling Polly client to create audio file for your text
###Code
import boto3
boto3_session = boto3.Session(profile_name = 'api_user')
polly_client = boto3_session.client('polly')
response = polly_client.synthesize_speech(
VoiceId = 'Brian',
OutputFormat = 'mp3',
Engine = "neural",
Text = text_to_convert)
file = open('podcast.mp3', 'wb')
file.write(response['AudioStream'].read())
file.close()
###Output
_____no_output_____ |
analysis/figure_nbs/FIGURE_bad_ranks.ipynb | ###Markdown
Select incongruous models
###Code
# Select observables and vamp scores
ranking_k = 2 # rank by this VAMp2 score
timescale_k = 2 # Select the timescale to judge the ranking by
plotting_k = 7 # plot up to this process
top_vamps = vamps.loc[(vamps.process==ranking_k) & (vamps.lag==lag), :].copy()
top_timescales = timescales.loc[(timescales.lag == lag), :].copy()
# rank models
rank_by_ix = top_performers(vamps, k=ranking_k, lag=lag)
# add ranks to observables/vamps
top_vamps[f'rank'] = top_vamps['hp_ix'].apply(lambda x: rank_by_ix.get(x))
top_timescales[f'rank'] = top_timescales['hp_ix'].apply(lambda x: rank_by_ix.get(x))
ts_df = top_timescales.sort_values(by=['rank'], inplace=False)
ts_df = ts_df.loc[ts_df.process == timescale_k, :]
bad_ix = []
for i, row1 in ts_df.iterrows():
ts = row1['median']
rank = row1['rank']
ix = row1['hp_ix']
tmp = []
for j, row2 in ts_df.loc[ts_df['rank']>rank, :].iterrows():
if row2['median'] > ts:
tmp.append(row2['hp_ix'])
ts = row2['median']
if len(tmp)>0:
tmp.append(ix)
tmp = [tmp[-1]] + tmp[:-1]
bad_ix.append(tuple(tmp))
sort_ix = np.argsort([len(x) for x in bad_ix])[::-1]
bad_ix_sorted = [bad_ix[i] for i in sort_ix]
with sns.plotting_context('paper', font_scale=1):
width =0.4
offset = 0.5
fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex='col', sharey='row')
cols = sns.color_palette('colorblind', timescales.process.max())
for col_ix, bad_set in enumerate([bad_ix[0], bad_ix_sorted[1]]):
use_ts = top_timescales.loc[top_timescales.hp_ix.isin(bad_set), :]
use_ts = use_ts.loc[use_ts.process < plotting_k, :]
use_vamps = top_vamps.loc[top_vamps.hp_ix.isin(bad_set), :]
# Plot VAMPS
vamp_ax = axes[0, col_ix]
plot_val_by_mod_proc(vamp_ax, use_vamps, color='k')
vamp_ax.yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.01))
# Plot timescales
time_ax = axes[1, col_ix]
plot_val_by_mod_proc(time_ax, use_ts)
time_ax.set_yscale('log')
# MD timescle
xlim = time_ax.get_xlim()
time_ax.hlines(md_t2[protein], *xlim, color='k',ls='dashed', label='MD estimate')
time_ax.set_xlim(xlim)
# labels
if col_ix == 0:
time_ax.set_ylabel('Timescale (ns)')
vamp_ax.set_ylabel(f'VAMP-2(k={k})')
axes[-1, col_ix].set_xlabel(f'VAMP2(k={k}) rank.')
# Legend
h, l = axes[-1, -1].get_legend_handles_labels()
axes[-1, -1].legend(h, l, bbox_to_anchor=(1, 1), loc='upper left', title='Timescale')
# Grid
axes[0, col_ix].grid()
axes[1, col_ix].grid()
letters = list('abcd')
for i, ax in enumerate(axes.flatten()):
ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(1))
ax.annotate(text=f"({letters[i]})", xy=(0.05, 0.95),
xycoords='axes fraction', ha='left',va='top')
plt.tight_layout()
plt.savefig(f'{protein}/bad_vamp_ranks.pdf', bbox_inches='tight')
###Output
_____no_output_____ |
model_scoring/agg_vs_best_comparison.ipynb | ###Markdown
Greater than the sum of its parts?*How does the aggregate model compare to the best individual classification?*In this notebook we will optimized both the aggregate model for a galaxy, as well as its best individual classification. We'll then compare the residuals and mean squared errors of the two, and see how they stack up!**Warning:** The fitting step here takes a long time (~15 minutes) to complete. Which sucks. First, define some useful magic commands and import needed modules
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os
import json
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import splprep, splev
import lib.galaxy_utilities as gu
import lib.python_model_renderer.parse_annotation as pa
import lib.python_model_renderer.render_galaxy as rg
from model_fitting import Model, ModelFitter
from sklearn.metrics import mean_squared_error
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', category=AstropyWarning)
###Output
_____no_output_____
###Markdown
Define the subject id of the galaxy we'll be working on
###Code
subject_id = 20902040
###Output
_____no_output_____
###Markdown
Load all the required metadata for plotting etc...
###Code
gal, angle = gu.get_galaxy_and_angle(subject_id)
pic_array, deprojected_image = gu.get_image(gal, subject_id, angle)
psf = gu.get_psf(subject_id)
diff_data = gu.get_image_data(subject_id)
pixel_mask = 1 - np.array(diff_data['mask'])[::-1]
galaxy_data = np.array(diff_data['imageData'])[::-1]
size_diff = diff_data['width'] / diff_data['imageWidth']
# arcseconds per pixel for zooniverse image
pix_size = pic_array.shape[0] / (gal['PETRO_THETA'].iloc[0] * 4)
# arcseconds per pixel for galaxy data
pix_size2 = galaxy_data.shape[0] / (gal['PETRO_THETA'].iloc[0] * 4)
imshow_kwargs = {
'cmap': 'gray_r', 'origin': 'lower',
'extent': (
# left of image in arcseconds from centre
-pic_array.shape[0]/2 / pix_size,
pic_array.shape[0]/2 / pix_size, # right...
-pic_array.shape[1]/2 / pix_size, # bottom...
pic_array.shape[1]/2 / pix_size # top...
),
}
plt.imshow(pic_array, **imshow_kwargs)
###Output
_____no_output_____
###Markdown
Grab the aggregate model
###Code
with open(
'../component-clustering/cluster-output/{}.json'.format(subject_id)
) as f:
aggregate_model = json.load(f)
agg_model = pa.parse_aggregate_model(aggregate_model, size_diff=size_diff)
###Output
_____no_output_____
###Markdown
And the best individual classification
###Code
with open('lib/best-classifications.json') as f:
all_best_cls = json.load(f)
best_cls = gu.classifications[
gu.classifications.classification_id == all_best_cls.get(str(subject_id))
].iloc[0]
best_model = pa.parse_annotation(json.loads(best_cls['annotations']), size_diff)
###Output
_____no_output_____
###Markdown
Define a helper function that will perform the model optimization
###Code
def fit_model(model, n=100):
m = deepcopy(model)
m['spiral'] = []
mf = ModelFitter(m, galaxy_data, psf, pixel_mask)
new_model, res = mf.fit(options={'maxiter': n})
print('{}, {}, N steps: {}'.format(res['success'], str(res['message']), res['nit']))
return new_model
###Output
_____no_output_____
###Markdown
Perform the optimization, warning: this takes a while.
###Code
%time fitted_best_model = fit_model(best_model)
%time fitted_agg_model = fit_model(agg_model)
###Output
b'CONVERGENCE: NORM_OF_PROJECTED_GRADIENT_<=_PGTOL', N steps: 34
CPU times: user 2min 16s, sys: 4.56 s, total: 2min 21s
Wall time: 2min 21s
###Markdown
Define a helper function that will do the post-processing of the models for plotting
###Code
conv = lambda arr: rg.convolve2d(arr, psf, mode='same', boundary='symm')
###Output
_____no_output_____
###Markdown
Calculate the rendered models and residuals to be plotted
###Code
fitted_best_rendered = rg.calculate_model(fitted_best_model, diff_data['width'])
fitted_agg_rendered = rg.calculate_model(fitted_agg_model, diff_data['width'])
fitted_best_comparison = rg.compare_to_galaxy(fitted_best_rendered, psf, galaxy_data, pixel_mask=pixel_mask, stretch=False)
fitted_agg_comparison = rg.compare_to_galaxy(fitted_agg_rendered, psf, galaxy_data, pixel_mask=pixel_mask, stretch=False)
###Output
_____no_output_____
###Markdown
Grab a value to use for limits on the residuals plot
###Code
l = max(fitted_best_comparison.max(), fitted_agg_comparison.max())
from sklearn.metrics import mean_squared_error
def make_suptitle(arr, pre=None):
s = mean_squared_error(0.8 * galaxy_data, arr)
plt.suptitle((pre + ' ' if pre else '') + 'Mean Squared Error: {:.8f}'.format(s))
fig, ax = plt.subplots(ncols=3, sharey=True, figsize=(15, 6))
ax[0].imshow(0.8 * galaxy_data, **imshow_kwargs, vmin=(0.8 * galaxy_data).min(), vmax=(0.8 * galaxy_data).max())
ax[1].imshow(conv(fitted_best_rendered), **imshow_kwargs, vmin=(0.8 * galaxy_data).min(), vmax=(0.8 * galaxy_data).max())
ax[2].imshow(
fitted_best_comparison,
**{**imshow_kwargs, 'cmap': 'RdGy'},
vmin=-l, vmax=l
)
make_suptitle(fitted_best_comparison, 'Best individual model:')
plt.tight_layout()
fig, ax = plt.subplots(ncols=3, sharey=True, figsize=(15, 6))
ax[0].imshow(0.8 * galaxy_data, **imshow_kwargs, vmin=(0.8 * galaxy_data).min(), vmax=(0.8 * galaxy_data).max())
ax[1].imshow(conv(fitted_agg_rendered), **imshow_kwargs, vmin=(0.8 * galaxy_data).min(), vmax=(0.8 * galaxy_data).max())
ax[2].imshow(
fitted_agg_comparison,
**{**imshow_kwargs, 'cmap': 'RdGy'},
vmin=-l, vmax=l
)
make_suptitle(fitted_agg_comparison, 'Aggregate model:')
plt.tight_layout();
fig, ax = plt.subplots(ncols=3, sharey=True, figsize=(15, 6))
ax[0].imshow(0.8 * galaxy_data, **imshow_kwargs, vmin=(0.8 * galaxy_data).min(), vmax=(0.8 * galaxy_data).max())
ax[1].imshow(conv(fitted_agg_rendered), **imshow_kwargs, vmin=(0.8 * galaxy_data).min(), vmax=(0.8 * galaxy_data).max())
ax[2].imshow(
fitted_agg_comparison,
**{**imshow_kwargs, 'cmap': 'RdGy'},
vmin=-l, vmax=l
)
make_suptitle(fitted_agg_comparison)
plt.tight_layout();
Model(fitted_best_model, galaxy_data, psf, pixel_mask)
Model(fitted_agg_model, galaxy_data, psf, pixel_mask)
###Output
_____no_output_____ |
Synsets, wordnet and Yelp reviews.ipynb | ###Markdown
Synsets, wordnet and Yelp reviews Here we use the `en_core_web_sm` spacy language model. If you haven't already, install it by running `python -m spacy download en_core_web_sm` in a terminal.Also, run nltk.download('sentiwordnet') if you never ran it.
###Code
import sys
import json
import spacy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nltk
from tqdm import tqdm_notebook
from nltk.corpus import sentiwordnet as swn
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import classification_report, confusion_matrix
###Output
_____no_output_____
###Markdown
`ConfusionMatrixDisplay` requires `scikit-learn`'s version to be $>0.20$. You can check if by running `!conda list scikit-learn` in a cell below here. Otherwise, you need to update it by running `conda update scikit-learn` in a terminal. Be aware that if you have `textacy` version $0.8$ also installed in the same environment, then scikit-learn will not update.**IF YOU DO NOT HAVE THIS MODULE AND YOU DON'T WANT TO INSTALL IT, THEN DO NOT RUN THE CELL BELOW!**. You'll just see the confusion matrix in textual format and not graphical.
###Code
from sklearn.metrics import ConfusionMatrixDisplay
###Output
_____no_output_____
###Markdown
Pre-processing class
###Code
class SpacyTokenizer(object):
def __init__(self, model='en_core_web_sm', lemma=True, pos_filter=None):
self.pos = pos_filter
self.lemma = lemma
self.nlp = spacy.load(model)
def tokenize(self, text):
tokens = []
for token in self.nlp(text):
if self.lemma:
tk = token.lemma_
else:
tk = token.text
if self.pos is None or token.pos_ in self.pos:
tokens.append((tk, token.pos_))
else:
pass
return tokens
###Output
_____no_output_____
###Markdown
Scoring class
###Code
class SentiWn(object):
def __init__(self, strategy='sum', use_pos=False):
self.strategy = strategy
self.pos = use_pos
self.pos_map = {
'NOUN': 'n',
'ADJ': 'a',
'VERB': 'v',
'ADV': 'r'
}
self.strategy_map = {
'sum': self._simple_sum,
'weighted_sum': self.weighted_sum,
'average_score': self.average_score,
'weighted_average': self.weighted_average}
# Simplest solution.
# Double-sum: we sum the score for each synset for each word
def _simple_sum(self, text):
s = np.zeros(3)
for token, pos in text:
if self.pos:
try:
synsets = list(swn.senti_synsets(token, self.pos_map[pos]))
except KeyError:
pass
else:
synsets = list(swn.senti_synsets(token))
for syn in synsets:
p, n, o = syn.pos_score(), syn.neg_score(), syn.obj_score()
s[0] += p
s[1] += n
s[2] += o
return s
# We weight the scores considering how many synsets each word has:
# the more syns a word has, the lower its importance.
def weighted_sum(self, text):
s = np.zeros(3)
all_s = []
if self.pos:
all_s = [list(swn.senti_synsets(token, self.pos_map[pos])) for token, pos in text]
else:
all_s = [list(swn.senti_synsets(token)) for token, pos in text]
for i, (token, pos) in enumerate(text):
try:
synsets = all_s[i]
sidf = np.log(max([len(l) for l in all_s]) / len(synsets))
for syn in synsets:
p, n, o = syn.pos_score(), syn.neg_score(), syn.obj_score()
s[0] += p * sidf
s[1] += n * sidf
s[2] += o * sidf # this is neutral
except ZeroDivisionError:
pass
return s
# We just average each score, so that we have an averaged positive, average negative
# and average neutral
def average_score(self, text):
counter = 0
s = np.zeros(3)
for token, pos in text:
if self.pos:
try:
synsets = list(swn.senti_synsets(token, self.pos_map[pos]))
except KeyError:
pass
else:
synsets = list(swn.senti_synsets(token))
for syn in synsets:
p, n, o = syn.pos_score(), syn.neg_score(), syn.obj_score()
s[0] += p
s[1] += n
s[2] += o
counter += 1
s[0] = s[0]/counter
s[1] = s[1]/counter
s[2] = s[2]/counter
return s
# We average the weighted sum
def weighted_average(self, text):
s = np.zeros(3)
all_s = []
if self.pos:
all_s = [list(swn.senti_synsets(token, self.pos_map[pos])) for token, pos in text]
else:
all_s = [list(swn.senti_synsets(token)) for token, pos in text]
counter = 0
for i, (token, pos) in enumerate(text):
try:
synsets = all_s[i]
sidf = np.log(max([len(l) for l in all_s]) / len(synsets))
for syn in synsets:
p, n, o = syn.pos_score(), syn.neg_score(), syn.obj_score()
s[0] += p * sidf
s[1] += n * sidf
s[2] += o * sidf # this is neutral
counter += sidf
except ZeroDivisionError:
pass
s[0] = s[0]/counter
s[1] = s[1]/counter
s[2] = s[2]/counter
return s
def predict(self, docs):
try:
score_function = self.strategy_map[self.strategy]
except KeyError:
raise Exception('{} strategy not yet available'.format(self.strategy))
self.doc_scores = np.array([score_function(doc) for doc in docs])
# we scale data bc the "objective" (=neutral) scores are always higher than pos and neg scores. Thus, if
# we just took the max, then every document would have been considered neutral
self.S = MinMaxScaler().fit_transform(self.doc_scores)
# returns the index of the column with the highest val for each row
# Thus: 0 = positive (first column), 1 = negative (second column), 2 = neutral
pred = self.S.argmax(axis=1)
y_pred = [1 if p == 0 else -1 if p == 1 else 0 for i, p in enumerate(pred)]
return y_pred
def custom_plots(self, y_true):
fig, ax = plt.subplots(figsize=(14, 4), nrows=2, ncols=2)
ax[0,0].boxplot(self.doc_scores)
ax[0,1].scatter(self.doc_scores[:,0], self.doc_scores[:,1], alpha=0.4, c=y_true)
ax[1,0].boxplot(self.S)
ax[1,1].scatter(self.S[:,0], self.S[:,1], alpha=0.4, c=y_true)
return plt
###Output
_____no_output_____
###Markdown
Pre-processing
###Code
yelp = pd.read_csv('data/yelp_example_1_small.tsv', sep='\t')
tokenizer = SpacyTokenizer(lemma=True, pos_filter=['NOUN', 'ADV', 'ADJ', 'VERB'])
tokenizer.tokenize(yelp.iloc[0].content)
docs, titles, scores = [], [], []
data = tqdm_notebook(list(yelp.iterrows()))
for i, row in data:
tokens = tokenizer.tokenize(row.content)
docs.append(tokens)
titles.append(row.business)
scores.append(row.score)
with open('data/yelp_example_1.json', 'w') as out:
json.dump({'docs': docs, 'titles': titles, 'scores': scores}, out)
###Output
_____no_output_____
###Markdown
Wordnet and synsets examples
###Code
synsets = list(swn.senti_synsets('happy'))
for syn in synsets:
print(syn)
for syn in synsets:
print(syn.synset.definition())
synsets = list(swn.senti_synsets('play', 'v'))
for syn in synsets:
print(syn)
for syn in synsets:
print(syn.synset.definition())
###Output
participate in games or sport
act or have an effect in a specified way or with a specific effect or outcome
play on an instrument
play a role or part
be at play; be engaged in playful activity; amuse oneself in a way characteristic of children
replay (as a melody)
perform music on (a musical instrument)
pretend to have certain qualities or state of mind
move or seem to move quickly, lightly, or irregularly
bet or wager (money)
engage in recreational activities rather than work; occupy oneself in a diversion
pretend to be somebody in the framework of a game or playful activity
emit recorded sound
perform on a certain location
put (a card or piece) into play during a game, or act strategically as if in a card game
engage in an activity as if it were a game rather than take it seriously
behave in a certain way
cause to emit recorded audio or video
manipulate manually or in one's mind or imagination
use to one's advantage
consider not very seriously
be received or accepted or interpreted in a specific way
behave carelessly or indifferently
cause to move or operate freely within a bounded space
perform on a stage or theater
be performed or presented for public viewing
cause to happen or to occur as a consequence
discharge or direct or be discharged or directed as if in a continuous stream
make bets
stake on the outcome of an issue
shoot or hit in a particular manner
use or move
employ in a game or in a specific position
contend against an opponent in a sport, game, or battle
exhaust by allowing to pull on the line
###Markdown
Application on the Yelp reviews
###Code
with open('data/yelp_example_1.json', 'r') as infile:
data = json.load(infile)
docs = data['docs']
titles = data['titles']
scores = data['scores']
''' The Num argument indicates the value of the review (i.e: 3 stars).
If the review has more the num stars, then it is postive (=1); otherwise, negative (=-1). 0 for neutral.
We can also get only positive and negative, without neutral, by setting the use_neutral argument to False
'''
def get_true_label_from_score(num, use_neutral = True):
if use_neutral:
return [1 if score > num else -1 if score < num else 0 for i, score in enumerate(scores)]
else:
return [1 if score >= num else -1 for i, score in enumerate(scores)]
y_true = get_true_label_from_score(3)
###Output
_____no_output_____
###Markdown
01. Simple sum
###Code
wn = SentiWn(strategy='sum', use_pos=True)
y_pred = wn.predict(docs)
wn.custom_plots(y_true).show()
def print_report_plot_cf(y_true, y_pred):
report = classification_report(y_true, y_pred)
cm = confusion_matrix(y_true, y_pred)
print(report)
if 'sklearn.metrics._plot.confusion_matrix' in sys.modules:
fig, ax = plt.subplots(figsize=(8, 8))
d = ConfusionMatrixDisplay(cm, [-1, 0, 1])
d.plot(cmap=plt.cm.Blues, ax=ax, values_format='10.0f')
plt.show()
else:
print(cm)
print_report_plot_cf(y_true, y_pred)
###Output
precision recall f1-score support
-1 0.35 0.39 0.37 1016
0 0.13 0.43 0.20 642
1 0.82 0.42 0.56 3342
accuracy 0.42 5000
macro avg 0.43 0.41 0.37 5000
weighted avg 0.63 0.42 0.47 5000
###Markdown
02. Weighted sum
###Code
wn_w = SentiWn(strategy='weighted_sum')
y_w_pred = wn_w.predict(docs)
wn_w.custom_plots(y_true).show()
print_report_plot_cf(y_true, y_w_pred)
###Output
precision recall f1-score support
-1 0.61 0.28 0.38 1016
0 0.14 0.27 0.19 642
1 0.78 0.77 0.77 3342
accuracy 0.60 5000
macro avg 0.51 0.44 0.45 5000
weighted avg 0.66 0.60 0.62 5000
###Markdown
03. Average score
###Code
wn_a = SentiWn(strategy='average_score')
y_a_pred = wn_a.predict(docs)
wn_a.custom_plots(y_true).show()
print_report_plot_cf(y_true, y_a_pred)
###Output
precision recall f1-score support
-1 0.56 0.01 0.03 1016
0 0.13 0.98 0.23 642
1 0.86 0.04 0.07 3342
accuracy 0.15 5000
macro avg 0.52 0.34 0.11 5000
weighted avg 0.70 0.15 0.08 5000
###Markdown
04. Weighted average
###Code
wn_wa = SentiWn(strategy='weighted_average')
y_wa_pred = wn_wa.predict(docs)
wn_wa.custom_plots(y_true).show()
print_report_plot_cf(y_true, y_wa_pred)
###Output
precision recall f1-score support
-1 0.61 0.01 0.02 1016
0 0.13 1.00 0.23 642
1 0.89 0.00 0.00 3342
accuracy 0.13 5000
macro avg 0.54 0.34 0.08 5000
weighted avg 0.73 0.13 0.04 5000
|
spot-oa/oa/proxy/ipynb_templates/Advanced_Mode_master.ipynb | ###Markdown
Apache Spot's Ipython Advanced Mode ProxyThis guide provides examples about how to request data, show data with some cool libraries like pandas and more. **Import Libraries**The next cell will import the necessary libraries to execute the functions. Do not remove
###Code
import datetime
import pandas as pd
import numpy as np
import linecache, bisect
import os
spath = os.getcwd()
path = spath.split("/")
date = path[len(path)-1]
###Output
_____no_output_____
###Markdown
**Request Data**In order to request data we are using Graphql (a query language for APIs, more info at: http://graphql.org/).We provide the function to make a data request, all you need is a query and variables
###Code
def makeGraphqlRequest(query, variables):
return GraphQLClient.request(query, variables)
###Output
_____no_output_____
###Markdown
Now that we have a function, we can run a query like this:*Note: There's no need to manually set the date for the query, by default the code will read the date from the current path
###Code
suspicious_query = """query($date:SpotDateType) {
proxy {
suspicious(date:$date)
{ clientIp
clientToServerBytes
datetime
duration
host
networkContext
referer
requestMethod
responseCode
responseCodeLabel
responseContentType
score
serverIp
serverToClientBytes
uri
uriPath
uriPort
uriQuery
uriRep
userAgent
username
webCategory
}
}
}"""
##If you want to use a different date for your query, switch the
##commented/uncommented following lines
variables={
'date': datetime.datetime.strptime(date, '%Y%m%d').strftime('%Y-%m-%d')
# 'date': "2016-10-08"
}
suspicious_request = makeGraphqlRequest(suspicious_query,variables)
##The variable suspicious_request will contain the resulting data from the query.
results = suspicious_request['data']['proxy']['suspicious']
###Output
_____no_output_____
###Markdown
Pandas DataframesThe following cell loads the results into a pandas dataframeFor more information on how to use pandas, you can learn more here: https://pandas.pydata.org/pandas-docs/stable/10min.html
###Code
df = pd.read_json(json.dumps(results))
##Printing only the selected column list from the dataframe
##Unless specified otherwise,
print df[['clientIp','uriQuery','datetime','clientToServerBytes','serverToClientBytes', 'host']]
###Output
_____no_output_____
###Markdown
Additional operations Additional operations can be performed on the dataframe like sorting the data, filtering it and grouping it**Filtering the data**
###Code
##Filter results where the destination port = 3389
##The resulting data will be stored in df2
df2 = df[df['clientIp'].isin(['10.173.202.136'])]
print df2[['clientIp','uriQuery','datetime','host']]
###Output
_____no_output_____
###Markdown
**Ordering the data**
###Code
srtd = df.sort_values(by="host")
print srtd[['host','clientIp','uriQuery','datetime']]
###Output
_____no_output_____
###Markdown
**Grouping the data**
###Code
## This command will group the results by pairs of source-destination IP
## summarizing all other columns
grpd = df.groupby(['clientIp','host']).sum()
## This will print the resulting dataframe displaying the input and output bytes columnns
print grpd[["clientToServerBytes","serverToClientBytes"]]
###Output
_____no_output_____
###Markdown
**Reset Scored Connections**Uncomment and execute the following cell to reset all scored connections for this day
###Code
# reset_scores = """mutation($date:SpotDateType!) {
# proxy{
# resetScoredConnections(date:$date){
# success
# }
# }
# }"""
# variables={
# 'date': datetime.datetime.strptime(date, '%Y%m%d').strftime('%Y-%m-%d')
# }
# request = makeGraphqlRequest(reset_scores,variables)
# print request['data']['proxy']['resetScoredConnections']['success']
###Output
_____no_output_____
###Markdown
SandboxAt this point you can perform your own analysis using the previously provided functions as a guide.Happy threat hunting!
###Code
#Your code here
###Output
_____no_output_____ |
_build/html/_sources/KDP-2_operands_aberrations.ipynb | ###Markdown
Aberrations
###Code
import pandas as pd
import itables
from itables import init_notebook_mode, show
import itables.options as opt
init_notebook_mode(all_interactive=True)
opt.lengthMenu = [50, 100, 200, 500]
#opt.classes = ["display", "cell-border"]
#opt.classes = ["display", "nowrap"]
opt.columnDefs = [{"className": "dt-left", "targets": "_all"}, {"width": "500px", "targets": 4}]
#opt.maxBytes = 0
#pd.get_option('display.max_columns')
#pd.get_option('display.max_rows')
#filename = r'C:\Work\Tools\OpticalDesign_Doku\KDP-2_optimization_operands.xlsx'
import os
cwd = os.getcwd()
filename = os.path.join(cwd, os.path.join('Excel','KDP-2_optimization_operands.xlsx'))
df_aberrations = pd.read_excel(filename, sheet_name = "aberrations", header = 1, index_col = 0)
df_aberrations = df_aberrations.dropna() # drop nan values
###Output
_____no_output_____
###Markdown
KDP-2 has operands for 3rd order, 5th order and 7th order aberrations:(click on "Order" to sort them in original order)
###Code
df_aberrations
###Output
_____no_output_____ |
surge_updates.ipynb | ###Markdown
Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) Kyle T. Mandli GeoClaw Storm Surge Modeling: Updates and New Features Thank You to Contributors Storm Object> This object contains a time series of time data that describe a particular storm. This includes the attributes below and the ability to read from multiple sources for data such as the U.S. National Hurricane Center (NHC), the Japanese Meterological Agency (JMA), and the Indian Meteorlogical Department (IMD). This class can then write out in any of these formats, construct the wind and pressure fields using a supported parameterized model, or output the GeoClaw supported storm format used for running storm surge simulations.**Important Point** - Python data container for time series description of a storm Core Capabilities - `read` in lots of storm formats (e.g. *ATCF*, *HURDAT*, *JMA*) - `write` out in some storm formats (e.g. *GeoClaw*, *ATCF*, *HURDAT*) - `plot` storm track and intensity - `category` of storm given a categorization scheme Utilities - `construct_fields` of a storm given a parameterization (e.g. Holland 1980, CLE 2015) - Others Example Use```python Storm parameters - Parameterized storm (Holland 1980)data.storm_specification_type = 'holland80' (type 1)data.storm_file = os.path.expandvars(os.path.join(os.getcwd(), 'ike.storm'))``````python Convert ATCF data to GeoClaw formatclawutil.data.get_remote_file( "http://ftp.nhc.noaa.gov/atcf/archive/2008/bal092008.dat.gz")atcf_path = os.path.join(scratch_dir, "bal092008.dat") Note that the get_remote_file function does not support gzip files which are not also tar files. The following code handles thiswith gzip.open(".".join((atcf_path, 'gz')), 'rb') as atcf_file, \ open(atcf_path, 'w') as atcf_unzipped_file: atcf_unzipped_file.write(atcf_file.read().decode('ascii'))``````python Read in unzipped fileike = Storm(path=atcf_path, file_format="ATCF")``````python Calculate landfall time - Need to specify as the file above does not include this info (9/13/2008 ~ 7 UTC)ike.time_offset = datetime.datetime(2008, 9, 13, 7)``````python Write storm data to GeoClaw formatike.write(data.storm_file, file_format='geoclaw')``` **GeoClaw Storm Format**```bash532008-09-13T07:00:00 -1.04040000e+06 -3.70000000e+01 1.72000000e+01 1.54333332e+01 1.66680000e+05 1.00600000e+05 4.63000000e+05 -1.01880000e+06 -3.84000000e+01 1.73000000e+01 1.80055554e+01 1.66680000e+05 1.00500000e+05 4.63000000e+05 -9.97200000e+05 -3.99000000e+01 1.75000000e+01 2.31499998e+01 1.66680000e+05 1.00300000e+05 4.63000000e+05 -9.75600000e+05 -4.13000000e+01 1.78000000e+01 2.31499998e+01 3.70400000e+04 1.00200000e+05 4.63000000e+05 -9.54000000e+05 -4.28000000e+01 1.82000000e+01 2.31499998e+01 3.70400000e+04 1.00000000e+05 5.09300000e+05 -9.32400000e+05 -4.43000000e+01 1.87000000e+01 2.57222220e+01 1.66680000e+05 9.99000000e+04 4.63000000e+05...```
###Code
import os
import gzip
import datetime
import matplotlib.pyplot as plt
from clawpack.geoclaw.surge.storm import Storm
import clawpack.clawutil as clawutil
# Scratch directory for storing topo and dtopo files:
scratch_dir = os.path.join(os.environ["CLAW"], 'geoclaw', 'scratch')
# Convert ATCF data to GeoClaw format
clawutil.data.get_remote_file("http://ftp.nhc.noaa.gov/atcf/archive/2008/bal092008.dat.gz")
atcf_path = os.path.join(scratch_dir, "bal092008.dat")
# Note that the get_remote_file function does not support gzip files which
# are not also tar files. The following code handles this
with gzip.open(".".join((atcf_path, 'gz')), 'rb') as atcf_file, \
open(atcf_path, 'w') as atcf_unzipped_file:
atcf_unzipped_file.write(atcf_file.read().decode('ascii'))
# Uncomment/comment out to use the old version of the Ike storm file
# ike = Storm(path="old_ike.storm", file_format="ATCF")
ike = Storm(path=atcf_path, file_format="ATCF")
# Calculate landfall time - Need to specify as the file above does not
# include this info (9/13/2008 ~ 7 UTC)
ike.time_offset = datetime.datetime(2008, 9, 13, 7)
# Plot - Incorporated into the head branch
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
category_color = {5: 'red',
4: 'yellow',
3: 'orange',
2: 'green',
1: 'blue',
0: 'gray'}
category = ike.category(categorization="NHC")
longitude = ike.eye_location[:, 0]
latitude = ike.eye_location[:, 1]
for i in range(len(longitude)):
color = category_color[category[i]]
axes.plot(longitude[i:i + 2], latitude[i:i + 2], color=color)
axes.set_title("Hurricane Ike Track")
axes.set_xlabel("Longitude")
axes.set_ylabel("Latitude")
plt.show()
###Output
_____no_output_____
###Markdown
Paramaterized Wind Models**Parameterizations:** Holland 1980, Holland 2010, CLE, SLOSH, Rankine, Modified Rankine, deMaria, WilloughbyAdded to both the Python and Fortran code.
###Code
import numpy
import matplotlib.pyplot as plt
# Parameters
N = 1000
radius = 100e3
Pn = 1005
Pc = 950
A = 23.0
B = 1.5
rho_air = 1.15
OMEGA = 7.2722052166430395e-5
THETA_0 = 0.52359877559829882
f = 2.0 * OMEGA * numpy.sin(THETA_0)
f = 0.0
# Evaluate profiles
x = numpy.concatenate((numpy.linspace(-radius, -0.01, N),
numpy.linspace(0.01, radius, N)), axis=0)
r = numpy.abs(x) * 1e-3
p = Pc + (Pn - Pc) * numpy.exp(-A/(r)**B)
C = 1e1**2 *A * B * (Pn - Pc) / (rho_air)
v = numpy.sqrt(C * numpy.exp(-A / r**B) / r**B + r**2 * f**2 / 4.0) - r * f / 2.0
fig = plt.figure()
fig.set_figwidth(fig.get_figwidth() * 2)
# Convert to kms
x /= 1e3
axes = fig.add_subplot(1, 2, 1)
axes.plot(x, v)
axes.set_title("Wind Velocity Profile")
axes.set_xlabel('km')
axes.set_ylabel('m/s')
axes.set_xlim([numpy.min(x), numpy.max(x)])
axes.set_ylim([0.0, numpy.max(v) + 5])
axes = fig.add_subplot(1, 2, 2)
axes.plot(x, p)
axes.set_title("Pressure Profile")
axes.set_xlabel('km')
axes.set_ylabel('mb')
axes.set_xlim([numpy.min(x), numpy.max(x)])
axes.set_ylim([Pc - 5, Pn + 5])
plt.show()
###Output
_____no_output_____ |
data_pulling/LinkExtraction.ipynb | ###Markdown
Link Extraction===Extracts links from journals, comments, and guestbook entries.Currently written up with pseudo-code to operate on the journals.This notebook is written with a workflow that made sense to Zach, but it can definitely be changed and updated: new functions can be written, old functions can be deleted or merged, etc.
###Code
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import os
import re
import pandas as pd
import numpy as np
from collections import Counter, defaultdict
import sqlite3
from html.parser import HTMLParser
from tqdm import tqdm, tqdm_notebook
from datetime import datetime
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Load the site dataThis provides the list of site URL 'names'.
###Code
site_metadata_working_dir = "/home/srivbane/shared/caringbridge/data/derived/site_metadata"
site_metadata_filepath = os.path.join(site_metadata_working_dir, "site_metadata_with_text.feather")
site_df = pd.read_feather(site_metadata_filepath)
len(site_df)
site_df.head()
# note there are a very small number of duplicate sites
duplicate_sites = site_df[site_df.duplicated(subset='name', keep=False)][['site_id', 'name', 'title', 'created_at', 'visits']]
len(duplicate_sites)
# read valid sites being included in this study
valid_site_ids = set()
data_selection_dir = "/home/srivbane/shared/caringbridge/data/projects/sna-social-support/data_selection"
with open(os.path.join(data_selection_dir, "valid_site_ids.txt"), 'r') as infile:
for line in infile:
site_id = line.strip()
if site_id == "":
continue
else:
valid_site_ids.add(int(site_id))
len(valid_site_ids)
# note that 5 of the 'duplicate name' site ids exist in the sample for this study
len(set(site_df.site_id[site_df.duplicated(subset='name', keep=False)]) & set(valid_site_ids))
# take a look at the duplicate sites, noting different creation dates and different titles
# in one case, we include one site but not the other in the SNA sample
# these sites need to be investigated more specifically
duplicate_sites['Human-readable creation date'] = duplicate_sites.created_at.apply(lambda created_at: str(datetime.utcfromtimestamp(created_at / 1000)))
duplicate_sites['In SNA sample?'] = duplicate_sites.site_id.apply(lambda site_id: site_id in valid_site_ids)
duplicate_sites.sort_values(by='name')
# random selection of CaringBridge site URLs
# these are what we need to match textual links to in order to build a network!
np.random.choice(site_df.name, 10, replace=False).tolist()
# 'site_names' is a set containing all of the valid CaringBridge site names
# notably including both sites in our sample and all other sites as well
site_names = set(site_df.name)
len(site_names)
###Output
_____no_output_____
###Markdown
Iterature through the journal data looking for links
###Code
# this returns the sqlite database connection that contains the journal update texts
def get_journal_text_db():
journal_text_filepath = "/home/srivbane/shared/caringbridge/data/projects/caringbridge_core/journal.sqlite"
db = sqlite3.connect(
journal_text_filepath,
detect_types=sqlite3.PARSE_DECLTYPES
)
db.row_factory = sqlite3.Row
return db
# this function connects to the database, makes a query, and passes the cursor to iterate_cursor
def connect_and_iterate():
try:
db = get_journal_text_db()
cursor = db.cursor()
cursor.execute("SELECT * FROM journalText") # TODO Remove this limit to get all of the sites!
iterate_cursor(cursor)
finally: # always do this with these databases!!
db.close()
# given a database cursor, this function extracts the text and passes it to get_link_texts
def iterate_cursor(cursor, total=19137078):
for row in tqdm(cursor, total=total):
body_text = str(row['body'])
site_id = row['site_id']
journal_oid = row['journal_oid']
link_texts = get_link_texts(body_text)
for txt in link_texts:
# TODO From each text link, should extract the URL slug and verify if it is in the set of site_names
if 'caringbridge' not in txt: #every valid link we care about must be a caringbridge link
spam.append(txt)
continue
words = txt.split('/')
#Check if one slug of the URL contains a valid site name
for item in words:
if 'caringbridge' in item or 'visit' in item:
continue
if 'al' in item or 'www2' in item or 'europe' in item:
spam.append(txt)
break
if item in site_names:
name = item
link = {
'site_id': site_id,
'journal_oid': journal_oid,
'link': txt,
'site_name': name
}
links.append(link)
# this returns a list of string objects that correspond to the links in the text
# I'll note that what we really want is the site 'name' in the URL, but this implementation makes no attempt to extract that name
def get_link_texts(text):
# TODO Implement me to find all links in the text, whether they are explicit links or just link mentions
# e.g. should match both an HTML hyperlink or a plain-text copy-pasted link.
# The existing implementation is a very simple one, and likely misses lots of links.
extracted_links = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',text)
links = []
for item in extracted_links:
#strip href of formatting
if '>' in item:
item = item[:item.find('>')-1]
links.append(item)
return links
links = []
spam = []
connect_and_iterate()
print("Valid links: ",len(links))
print("Invalid links: ",len(spam))
print("Percentage of valid links: ",(len(links)/ (len(links) + len(spam))))
links[:10]
links_df = pd.DataFrame(links)
links_df.head()
# Write the resulting links dataframe to a CSV file
working_dir = "/home/srivbane/shared/caringbridge/data/projects/sna-social-support/data_pulling"
links_df.to_csv(os.path.join(working_dir, 'journal_intersite_links.csv'))
print("Finished.")
###Output
Finished.
|
ICCT_en/examples/04/SS-37-Quadrotor_longitudinal_velocity_control.ipynb | ###Markdown
Quadrotor longitudinal velocity controlThe quadrotor (or quadcopter) longitudinal velocity $v$ may be controlled by tilting the vehicle by the pitch angle. The angle $\theta$ is controlled by applying the torque $T$ using the propellers. The vehicle moment of inertia is $J= 1.3e-2$. When the vehicle is tilted by the angle $\theta$, the propellers produce a forward force approximately equal to $F_v = F\theta = mg\theta$ and the aerodynamic drag is $F_c=-cv=-0.9v$, where $m=2000$ g is vehicle mass, and $g = 9.81$ m/s^2 gravity acceleration. Maximum torque equals $5000$ mNm. Pitch angle $\theta$ must be limited to $\pm30$ degrees during all operations and maximum velocity to 2 m/s. The pitch angle is estimated by an appropriate sensor and velocity is measured with GPS. The design procedure follows the following two-step procedure:1. Write the system equations in state space form for the rotational dynamics (torque $T$ to pitch angle $\theta$) and the longitudinal dynamics (pitch angle to forward velocity $v$).2. Design a regulator for $v$ in order to satisfy the following specifications: - Settling time for 5% tolerance band of less than 2.5 seconds. - No Overshoot. - No steady-state error in response to a step velocity request. System equationsThe system equations are equal to:\begin{cases} m\dot{v} = F_v + F_c = mg\theta -cv \\ J\ddot{\theta} = T.\end{cases}By defining the state vector as $x = \begin{bmatrix} x_1 & x_2 & x_3 \end{bmatrix}^T = \begin{bmatrix} v & \theta & \dot{\theta} \end{bmatrix}^T$ and the input $u=T$, system equations in state space form become:\begin{cases}\dot{x} = \begin{bmatrix} -c/m & g & 0 \\ 0 & 0 & 1 \\ 0 & 0 & 0 \end{bmatrix}x + \begin{bmatrix} 0 \\ 0 \\ 1/J \end{bmatrix}u \\y = \begin{bmatrix} y_1 \\ y_2 \end{bmatrix} = \begin{bmatrix} 1 & 0 & 0 \\ 0 & 1 & 0 \end{bmatrix}x\end{cases}The dynamics from $u$ to $\theta$ is a double integrator while that from $\theta$ to $v$ is a first order dynamics with a pole in $-c/m$. The system has two outputs: velocity and tilt angle. The controllability matrix $\mathcal{C}$ is
###Code
A = numpy.matrix('-0.45 9.81 0; 0 0 1; 0 0 0')
B = numpy.matrix([[0],[0],[1/1.3E-02]])
C = numpy.matrix('1 0 0; 0 1 0')
D = numpy.matrix('0; 0')
CM = control.ctrb(A,B)
display(Markdown(bmatrix(CM)))
# print(numpy.linalg.matrix_rank(CM))
###Output
_____no_output_____
###Markdown
and has rank equal to 3 so the system is controllable.The observability matrix $\mathcal{O}$ is
###Code
OM = control.obsv(A,C)
display(Markdown(bmatrix(OM)))
# print(numpy.linalg.matrix_rank(OM))
###Output
_____no_output_____
###Markdown
and has rank equal to 3 so the system is observable. Regulator design Observer designSince we have the direct measurements of $x_1$ and $x_2$ we are only interested in estimating $x_3$. If we look at the subsystem $(x_2, \, x_3)$ we note that it is observable, so it is possible to design an observer by considering only this subsystem. The structure of our estimator is therefore:$$\begin{bmatrix} \dot{\hat{x}_2} \\ \dot{\hat{x}_3} \end{bmatrix} = \begin{bmatrix} 0 & 1 \\ 0 & 0 \end{bmatrix}\begin{bmatrix} \hat{x}_2 \\ \hat{x}_3 \end{bmatrix} + \begin{bmatrix} 0 \\ 1/J \end{bmatrix}u + \begin{bmatrix} l_1 \\ l_2 \end{bmatrix}\left( y - C\begin{bmatrix} \hat{x}_2 \\ \hat{x}_3 \end{bmatrix} \right) = \begin{bmatrix} -l_1 & 1 \\ -l_2 & 0 \end{bmatrix}\begin{bmatrix} \hat{x}_2 \\ \hat{x}_3 \end{bmatrix} + \begin{bmatrix} 0 \\ 1/J \end{bmatrix}u + \begin{bmatrix} l_1 \\ l_2 \end{bmatrix}y$$applying the Laplace transform and solving for $\hat{x}_3(s)$ we arrive at$$\hat{x}_3(s) = \frac{l_2s}{s^2+l_1s+l_2}y_2(s) + \frac{s+l_1}{s^2+l_1s+l_2}\frac{u(s)}{J}.$$We now have a simple linear estimator for $x_3$ that is asymptotically stable for any $l_1>0$ and $l_2>0$. It is interesting to note that if $l_2\rightarrow \infty$, the estimator transfer function simplifies to $\hat{x}_3(s) = s y_2(s)$ and the result equals $\hat{x}_3 = \dot{\theta}$; it is obtained by differentiating the measured $y_2 = \theta$.Selecting $l_1 = 20$ and $l_2 = 100$ places both observer eigenvalues in $-10$. Controller designFor the requirement on settling time, the frequency of the poles must be greater than $3/T_S$ for real poles and greater than $3/\zeta Ts$ for complex poles, where $T_s$ is the settling time (5%) and $\zeta$ the damping. Good poles locations, in terms of response and input energy, for a double integrator system, should lay within a range of $\pm 45°$ w.r.t. the real negative axis. By first considering these facts and then proceeding iteratively, dominant poles were chosen in $-2.8\pm1.0i$, whereas the third pole was chosen at much higher frequency: $-15$.For the requirement of zero steady-state error the reference input is scaled by a gain equal to the inverse of the steady-state gain of the closed-loop system, yielding total closed-loop gain of $1.0$. How to use this notebook?- Verify the requested specifications in case of initial error in the estimate of $x_3$ for both positive and negative error.- Watch the changed response and, by having a physical system in your mind, try to understand why it changed the way it did.
###Code
# Preparatory cell
X0 = 0.0
K = numpy.matrix([8/15,-4.4,-4])
L = numpy.matrix([[66],[107/3]])
Aw = matrixWidget(3,3)
Aw.setM(A)
Bw = matrixWidget(3,1)
Bw.setM(B)
Cw = matrixWidget(1,3)
Cw.setM(C)
X0w = widgets.FloatText(
value=X0,
description='',
disabled=False
)
Kw = matrixWidget(1,3)
Kw.setM(K)
Lw = matrixWidget(2,1)
Lw.setM(L)
eig1c = matrixWidget(1,1)
eig2c = matrixWidget(2,1)
eig3c = matrixWidget(1,1)
eig1c.setM(numpy.matrix([-15.]))
eig2c.setM(numpy.matrix([[-2.8],[-1.0]]))
eig3c.setM(numpy.matrix([-15.]))
eig2o = matrixWidget(2,1)
eig3o = matrixWidget(1,1)
eig2o.setM(numpy.matrix([[-10.],[0.]]))
eig3o.setM(numpy.matrix([-10.]))
# Misc
#create dummy widget
DW = widgets.FloatText(layout=widgets.Layout(width='0px', height='0px'))
#create button widget
START = widgets.Button(
description='Test',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Test',
icon='check'
)
def on_start_button_clicked(b):
#This is a workaround to have intreactive_output call the callback:
# force the value of the dummy widget to change
if DW.value> 0 :
DW.value = -1
else:
DW.value = 1
pass
START.on_click(on_start_button_clicked)
# Define type of method
selm = widgets.Dropdown(
options= ['Set K and L', 'Set the eigenvalues'],
value= 'Set the eigenvalues',
description='',
disabled=False
)
# Define the number of complex eigenvalues
sele = widgets.Dropdown(
options= ['0 complex eigenvalues', '2 complex eigenvalues'],
value= '2 complex eigenvalues',
description='Complex eigenvalues:',
style = {'description_width': 'initial'},
disabled=False
)
#define type of ipout
selu = widgets.Dropdown(
options=['impulse', 'step', 'sinusoid', 'square wave'],
value='step',
description='Type of reference:',
style = {'description_width': 'initial'},
disabled=False
)
# Define the values of the input
u = widgets.FloatSlider(
value=2,
min=0,
max=4,
step=0.1,
description='Reference:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
period = widgets.FloatSlider(
value=0.5,
min=0.001,
max=10,
step=0.001,
description='Period: ',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
)
gain_w2 = widgets.FloatText(
value=1.,
description='',
disabled=True
)
simTime = widgets.FloatText(
value=3,
description='',
disabled=False
)
# Support functions
def eigen_choice(sele):
if sele == '0 complex eigenvalues':
eig1c.children[0].children[0].disabled = False
eig2c.children[1].children[0].disabled = True
eig2o.children[1].children[0].disabled = True
eig = 0
if sele == '2 complex eigenvalues':
eig1c.children[0].children[0].disabled = True
eig2c.children[1].children[0].disabled = False
eig2o.children[1].children[0].disabled = False
eig = 2
return eig
def method_choice(selm):
if selm == 'Set K and L':
method = 1
sele.disabled = True
if selm == 'Set the eigenvalues':
method = 2
sele.disabled = False
return method
# Reduced system
Ar = numpy.matrix('0 1; 0 0')
Br = numpy.matrix([[0],[1/1.3E-02]])
Cr = numpy.matrix('1 0')
Dr = numpy.matrix('0')
def main_callback2(Aw, Bw, X0w, K, L, eig1c, eig2c, eig3c, eig2o, eig3o, u, period, selm, sele, selu, simTime, DW):
eige = eigen_choice(sele)
method = method_choice(selm)
if method == 1:
solc = numpy.linalg.eig(A-B*K)
solo = numpy.linalg.eig(Ar-L*Cr)
if method == 2:
if eige == 0:
K = control.acker(A, B, [eig1c[0,0], eig2c[0,0], eig3c[0,0]])
Kw.setM(K)
L = control.acker(Ar.T, Cr.T, [eig2o[0,0], eig3o[0,0]]).T
Lw.setM(L)
if eige == 2:
K = control.acker(A, B, [eig3c[0,0],
numpy.complex(eig2c[0,0],eig2c[1,0]),
numpy.complex(eig2c[0,0],-eig2c[1,0])])
Kw.setM(K)
L = control.acker(Ar.T, Cr.T, [numpy.complex(eig2o[0,0],eig2o[1,0]),
numpy.complex(eig2o[0,0],-eig2o[1,0])]).T
Lw.setM(L)
sys = control.ss(A,B,numpy.vstack((C,numpy.zeros((B.shape[1],C.shape[1])))),numpy.vstack((D,numpy.eye(B.shape[1]))))
sysC = control.ss(numpy.zeros((1,1)),
numpy.zeros((1,numpy.shape(A)[0])),
numpy.zeros((numpy.shape(B)[1],1)),
-K)
sysE = control.ss(Ar-L*Cr,
numpy.hstack((L,Br-L*Dr)),
numpy.matrix('0 1'),
numpy.zeros((1,2)))
sys_append = control.append(sys, sysE, sysC, control.ss(A,B,numpy.eye(A.shape[0]),numpy.zeros((A.shape[0],B.shape[1]))))
Q = []
# y in ingresso a sysE
for i in range(1):
Q.append([B.shape[1]+i+1, i+2])
# u in ingresso a sysE
for i in range(B.shape[1]):
Q.append([B.shape[1]+1+i+1, C.shape[0]+i+1])
# u in ingresso a sys
for i in range(B.shape[1]):
Q.append([i+1, C.shape[0]+B.shape[1]+1+i+1])
# u in ingresso al sistema che ha come uscite gli stati reali
for i in range(B.shape[1]):
Q.append([2*B.shape[1]+1+A.shape[0]+i+1, C.shape[0]+i+1])
# xe in ingresso a sysC
Q.append([2*B.shape[1]+1+1, 1])
Q.append([2*B.shape[1]+1+1+1, 1+1])
Q.append([2*B.shape[1]+1+2+1, C.shape[0]+B.shape[1]+1])
inputv = [i+1 for i in range(B.shape[1])]
outputv = [i+1 for i in range(numpy.shape(sys_append.C)[0])]
sys_CL = control.connect(sys_append,
Q,
inputv,
outputv)
t = numpy.linspace(0, 100000, 2)
t, yout = control.step_response(sys_CL[0,0],T=t)
dcgain = yout[-1]
gain_w2.value = dcgain
if dcgain != 0:
u1 = u/gain_w2.value
else:
print('The feedforward gain is set to 0 and it changed to 1.')
u1 = u/1
print('The static gain of the closed-loop system (from the reference to the output) is: %.5f' %dcgain)
X0w1 = numpy.zeros((2*A.shape[0]+2,1))
X0w1[A.shape[0]+1,0] = X0w
if simTime != 0:
T = numpy.linspace(0, simTime, 10000)
else:
T = numpy.linspace(0, 1, 10000)
if selu == 'impulse': #selu
U = [0 for t in range(0,len(T))]
U[0] = u
U1 = [0 for t in range(0,len(T))]
U1[0] = u1
T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1)
if selu == 'step':
U = [u for t in range(0,len(T))]
U1 = [u1 for t in range(0,len(T))]
T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1)
if selu == 'sinusoid':
U = u*numpy.sin(2*numpy.pi/period*T)
U1 = u1*numpy.sin(2*numpy.pi/period*T)
T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1)
if selu == 'square wave':
U = u*numpy.sign(numpy.sin(2*numpy.pi/period*T))
U1 = u1*numpy.sign(numpy.sin(2*numpy.pi/period*T))
T, yout, xout = control.forced_response(sys_CL,T,U1,X0w1)
# N.B. i primi 3 stati di xout sono quelli del sistema, mentre gli ultimi 3 sono quelli dell'osservatore
step_info_dict = control.step_info(sys_CL[0,0],SettlingTimeThreshold=0.05,T=T)
print('Step info: \n\tRise time =',step_info_dict['RiseTime'],'\n\tSettling time (5%) =',step_info_dict['SettlingTime'],'\n\tOvershoot (%)=',step_info_dict['Overshoot'])
print('Max x2 value (%)=', max(abs(yout[C.shape[0]+2*B.shape[1]+1+1]))/(numpy.pi/180*30)*100)
fig = plt.figure(num='Simulation1', figsize=(14,12))
fig.add_subplot(221)
plt.title('Output response')
plt.ylabel('Output')
plt.plot(T,yout[0],T,U,'r--')
plt.xlabel('$t$ [s]')
plt.axvline(x=0,color='black',linewidth=0.8)
plt.axhline(y=0,color='black',linewidth=0.8)
plt.legend(['$y$','Reference'])
plt.grid()
fig.add_subplot(222)
plt.title('Input')
plt.ylabel('$u$')
plt.plot(T,yout[C.shape[0]])
plt.xlabel('$t$ [s]')
plt.axvline(x=0,color='black',linewidth=0.8)
plt.axhline(y=0,color='black',linewidth=0.8)
plt.grid()
fig.add_subplot(223)
plt.title('States response')
plt.ylabel('States')
plt.plot(T,yout[C.shape[0]+2*B.shape[1]+1],
T,yout[C.shape[0]+2*B.shape[1]+1+1],
T,yout[C.shape[0]+2*B.shape[1]+1+2],
T,[numpy.pi/180*30 for i in range(len(T))],'r--',
T,[-numpy.pi/180*30 for i in range(len(T))],'r--')
plt.xlabel('$t$ [s]')
plt.legend(['$x_{1}$','$x_{2}$','$x_{3}$','limit +$x_{2}$','limit -$x_{2}$'])
plt.axvline(x=0,color='black',linewidth=0.8)
plt.axhline(y=0,color='black',linewidth=0.8)
plt.grid()
fig.add_subplot(224)
plt.title('Estimation error')
plt.ylabel('Error')
plt.plot(T,yout[C.shape[0]+2*B.shape[1]+1+2]-yout[C.shape[0]+B.shape[1]])
plt.xlabel('$t$ [s]')
plt.legend(['$e_{3}$'])
plt.axvline(x=0,color='black',linewidth=0.8)
plt.axhline(y=0,color='black',linewidth=0.8)
plt.grid()
#plt.tight_layout()
alltogether2 = widgets.VBox([widgets.HBox([selm,
sele,
selu]),
widgets.Label(' ',border=3),
widgets.HBox([widgets.Label('K:',border=3), Kw,
widgets.Label(' ',border=3),
widgets.Label(' ',border=3),
widgets.Label('Eigenvalues:',border=3),
eig1c,
eig2c,
eig3c,
widgets.Label(' ',border=3),
widgets.Label(' ',border=3)]),
widgets.Label('X0 est.:',border=3), X0w,
widgets.Label(' ',border=3),
widgets.HBox([widgets.Label('L:',border=3), Lw,
widgets.Label(' ',border=3),
widgets.Label(' ',border=3),
widgets.Label('Eigenvalues:',border=3),
eig2o,
eig3o,
widgets.Label(' ',border=3),
widgets.VBox([widgets.Label('Inverse reference gain:',border=3),
widgets.Label('Simulation time (s):',border=3)]),
widgets.VBox([gain_w2,simTime])]),
widgets.Label(' ',border=3),
widgets.HBox([u,
period,
START])])
out2 = widgets.interactive_output(main_callback2, {'Aw':Aw, 'Bw':Bw, 'X0w':X0w, 'K':Kw, 'L':Lw,
'eig1c':eig1c, 'eig2c':eig2c, 'eig3c':eig3c, 'eig2o':eig2o, 'eig3o':eig3o,
'u':u, 'period':period, 'selm':selm, 'sele':sele, 'selu':selu, 'simTime':simTime, 'DW':DW})
out2.layout.height = '870px'
display(out2, alltogether2)
###Output
_____no_output_____ |
analysis/long-fixations/long-fixations.ipynb | ###Markdown
Calculate mean length of active state saccades
###Code
trange_active_len = np.zeros(len(traces))
for i in range(len(traces)):
trange, data, pe_start_index, displacement_index, release_index, step_pos = fitting_functions.importActiveData('../../data/active/fixed/'+traces[i]+'.mat')
trange_active_len[i] = trange[displacement_index]+0.5 # time 0 = 500 ms after saccade
np.mean(trange_active_len)
###Output
_____no_output_____
###Markdown
Compare extrapolations to "ground truth"
###Code
def importLongFixation(filename):
data_file = sio.loadmat(filename)
trange = data_file['trange'][0]
fixation = data_file['fixation'][0]
# fixation = fixation[::72]
return trange, fixation
###Output
_____no_output_____
###Markdown
% change in MSE as a function of number of components
###Code
trange_lens = np.zeros(len(traces))
for i in range(len(traces)):
trange, fixation = importLongFixation('../../data/long-fixations/fixed/'+traces[i]+'_long.mat')
trange_lens[i] = len(trange)
fit_file = sio.loadmat('results/'+traces[i]+'_long.mat')
# lls = fit_file['lls']
mse = fit_file['sses']/len(trange)
mse_best = np.min(mse, axis=1)
delta_mse = (mse_best[1:] - mse_best[:-1])/mse_best[:-1]
plt.subplot(3,3,i+1)
plt.plot(np.arange(5)+2, delta_mse*100,'.-')
plt.title(traces[i],fontsize=10)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Above we plot the percent change in MSE when moving from $n-1$ to $n$ components (axis label is $n$). For all extrapolations, there was a negligible decrease in MSE after 2 components.
###Code
Tmin = int(np.min(trange_lens))
print(trange[Tmin])
###Output
15.26
###Markdown
The shortest long fixation recording we have ends at 15.26 s after saccade. We will use this time point as the end of a window over which we will evaluate the extrapolation quality, and as the point at which we will add to the cost functions for conservative extrapolations of real active state data.
###Code
eye_pos_averages = np.zeros(len(traces))
relative_errors = np.zeros(len(traces))
for i in range(len(traces)):
trace = traces[i]
trange, fixation = importLongFixation('../../data/long-fixations/fixed/'+trace+'_long.mat')
eye_pos_averages[i] = np.mean(fixation[Tmin-16:Tmin]) # Average calculated over a window of 16 elements = 230 ms
fit_file = sio.loadmat('results/'+trace+'_long.mat')
lls = fit_file['lls']
fits = fit_file['fits']
best_fit_ind = np.argmin(lls[1, :])
# Evaluate extrapolation at time corresponding to middle of window
model_val = fitting_functions.exponentialModel(trange[Tmin-8]-trange[0], fits[1,0][best_fit_ind, :])
relative_errors[i] = (eye_pos_averages[i]-model_val)/model_val
###Output
_____no_output_____
###Markdown
We use a window of 16 elements, from $t =$ 15.03 to 15.26 s after the saccade time, and calculate the average eye position. Then, we compare this to the value of the extrapolated eye position (using the first 0.5-4.8 s) at $t =$ 15.14 s, the middle of the window, by calculating the percent deviation between the extrapolation and the average eye position, with respect to the extrapolation.
###Code
np.mean(relative_errors), np.std(relative_errors)
###Output
_____no_output_____
###Markdown
Figure 2D, right
###Code
plt.scatter(np.ones(len(traces)), relative_errors)
sio.savemat('all_relative_errors.mat', {'errs':relative_errors})
sio.savemat('relative_errors.mat', {'t':trange[Tmin-8], 'delta':np.min(relative_errors)})
###Output
_____no_output_____ |
examples/Dumbbells/BCC_Calculations/Fe-Mn/FeMn_simulation.ipynb | ###Markdown
Mn Thermodynamic data
###Code
# Jump rates and energy barriers set. Now, let's set the calculations up.
vu0 = 4.4447
vu2 = 5.9297
Dconv=1e-2
predb0, enedb0 = np.ones(1)*np.exp(0.050), np.array([E_f_pdb])
# We'll measure every formation energy relative to the solute formation energy.
preS, eneS = np.ones(1), np.array([0.0])
# Next, interaction or the excess energies and pre-factors for solutes and dumbbells.
preSdb, eneSdb = np.ones(onsagercalculator.thermo.mixedstartindex), \
np.zeros(onsagercalculator.thermo.mixedstartindex)
# Now, we go over the necessary stars and assign interaction energies
for (key, index) in name_to_themo_star.items():
eneSdb[index] = name_to_Ef[key] - E_f_pdb
predb2, enedb2 = np.ones(1), np.array([E_f_mdb])
# Transition state energies - For omega0, omega2 and omega43, the first type is the Johnson jump,
# and the second one is the Rigid jump.
# Omega0 TS eneriges
# taken directly from the paper
preT0, eneT0 = Dconv*vu0*np.ones(1), np.array([E_f_pdb + 0.33541396, E_f_pdb + 0.61091396, E_f_pdb + 0.784315123])
# Omega2 TS energies
Nj2 = len(onsagercalculator.jnet2)
preT2, eneT2 = Dconv*vu2*np.ones(Nj2), np.array([ef_ts_2, ef_ts_2_rigid, ef_ts_2_rot])
# Omega43 TS energies
preT43, eneT43 = Dconv*vu0*np.ones(1), np.array([ef_ts_43])
# Omega1 TS energies
preT1 = Dconv*vu0*np.ones(len(onsagercalculator.jnet1))
eneT1 = np.array([eneT0[i] for i in onsagercalculator.om1types])
# Now, we go over the jumps that are provided and make the necessary changes
for (key, index) in jmpdict.items():
eneT1[index] = Jname_2_ef_ts[key]
eneT1[0] = 0.0
# print(eneT1)
data_Mn = {"puredb_data":(predb0, enedb0), "mixed_db_data":(predb2, enedb2), "omega0_data":(preT0, eneT0),
"omega2_data":(preT2, eneT2),"omega43_data":(preT43, eneT43), "omega1_data":(preT1, eneT1),
"S-db_interaction_data":(preSdb, eneSdb)}
from tqdm import tqdm
# Then we calculate the transport coefficients
diff_aa_Mn = np.zeros(len(temp))
diff_ab_Mn = np.zeros(len(temp))
diff_bb = np.zeros(len(temp))
diff_bb_non_loc = np.zeros(len(temp))
start = time.time()
for i in tqdm(range(len(temp)), position=0, leave=True):
T = temp[i]
kT = kB*T
bFdb0, bFdb2, bFS, bFSdb, bFT0, bFT1, bFT2, bFT3, bFT4 = \
onsagercalculator.preene2betafree(kT, predb0, enedb0, preS, eneS, preSdb, eneSdb, predb2, enedb2,
preT0, eneT0, preT2, eneT2, preT1, eneT1, preT43, eneT43)
# get the probabilities and other data from L_ij
L0bb, (L_uc_aa,L_c_aa), (L_uc_bb,L_c_bb), (L_uc_ab,L_c_ab)=\
onsagercalculator.L_ij(bFdb0, bFT0, bFdb2, bFT2, bFS, bFSdb, bFT1, bFT3, bFT4)
L_aa = L_uc_aa + L_c_aa
L_bb = L_uc_bb + L_c_bb
L_ab = L_uc_ab + L_c_ab
diff_aa_Mn[i] = L_aa[0][0]
diff_ab_Mn[i] = L_ab[0][0]
diff_bb[i] = L_bb[0][0]
diff_bb_non_loc[i] = L0bb[0][0]
print(time.time() - start)
import h5py
with h5py.File("Mn_data.h5","w") as fl:
fl.create_dataset("diff_aa", data=diff_aa_Mn)
fl.create_dataset("diff_ab", data=diff_ab_Mn)
fl.create_dataset("diff_bb_nl", data=diff_bb_non_loc)
fl.create_dataset("diff_bb", data=diff_bb)
fl.create_dataset("Temp", data=temp)
# Now let's do the infinite temeperature limit
kT = np.inf
bFdb0, bFdb2, bFS, bFSdb, bFT0, bFT1, bFT2, bFT3, bFT4 = \
onsagercalculator.preene2betafree(kT, predb0, enedb0, preS, eneS, preSdb, eneSdb, predb2, enedb2,
preT0, eneT0, preT2, eneT2, preT1, eneT1, preT43, eneT43)
# bFdicts[i] = [bFdb0, bFdb2, bFS, bFSdb, bFT0, bFT1, bFT2, bFT3, bFT4]
# get the probabilities and other data from L_ij
L0bb, (L_uc_aa,L_c_aa), (L_uc_bb,L_c_bb), (L_uc_ab,L_c_ab)=\
onsagercalculator.L_ij(bFdb0, bFT0, bFdb2, bFT2, bFS, bFSdb, bFT1, bFT3, bFT4)
L_aa = L_uc_aa + L_c_aa
L_bb = L_uc_bb + L_c_bb
L_ab = L_uc_ab + L_c_ab
L_ab[0][0]/L_aa[0][0]
###Output
_____no_output_____ |
src/KDD_Cup_Data_analysis.ipynb | ###Markdown
Get the data and do some prepocessing
###Code
import sys
sys.executable
import numpy as np
import pandas as pd # use version==1.2.5 incase you want to run pandas profiling
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
# Data is available at: https://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html
# description of column names at: https://kdd.ics.uci.edu/databases/kddcup99/kddcup.names
col_names = ['duration', 'protocol_type', 'service', 'flag', 'src_bytes', 'dst_bytes', 'land',
'wrong_fragment', 'urgent', 'hot', 'num_failed_logins', 'logged_in', 'num_compromised',
'root_shell', 'su_attempted', 'num_root', 'num_file_creations', 'num_shells', 'num_access_files',
'num_outbound_cmds', 'is_host_login', 'is_guest_login', 'count', 'srv_count', 'serror_rate',
'srv_serror_rate', 'rerror_rate', 'srv_rerror_rate', 'same_srv_rate', 'diff_srv_rate',
'srv_diff_host_rate', 'dst_host_count', 'dst_host_srv_count', 'dst_host_same_srv_rate',
'dst_host_diff_srv_rate', 'dst_host_same_src_port_rate', 'dst_host_srv_diff_host_rate',
'dst_host_serror_rate', 'dst_host_srv_serror_rate', 'dst_host_rerror_rate',
'dst_host_srv_rerror_rate']
num_col = ['duration', 'src_bytes', 'dst_bytes', 'wrong_fragment', 'urgent', 'hot',
'num_failed_logins', 'num_compromised', 'root_shell', 'su_attempted', 'num_root',
'num_file_creations', 'num_shells', 'num_access_files', 'num_outbound_cmds', 'count',
'srv_count', 'serror_rate', 'srv_serror_rate', 'rerror_rate', 'srv_rerror_rate',
'same_srv_rate', 'diff_srv_rate', 'srv_diff_host_rate', 'dst_host_count',
'dst_host_srv_count', 'dst_host_same_srv_rate', 'dst_host_diff_srv_rate',
'dst_host_same_src_port_rate', 'dst_host_srv_diff_host_rate', 'dst_host_serror_rate',
'dst_host_srv_serror_rate', 'dst_host_rerror_rate', 'dst_host_srv_rerror_rate']
data_path = "../data/kddcup_data"
df = pd.read_csv(data_path, names=col_names+["threat_type"]) # threat type is the target
# do some preprocessing
# print(' ')
df['threat_type'] = df['threat_type'].str.replace('.', '', regex=True)
df['threat_type'].unique()
df['threat_type'].value_counts()
indexNames = df[(df['threat_type'] == 'spy') | (df['threat_type'] == 'perl') | (df['threat_type'] == 'phf')
| (df['threat_type'] == 'multihop') | (df['threat_type'] == 'ftp_write') | (df['threat_type'] == 'loadmodule')
| (df['threat_type'] == 'rootkit') | (df['threat_type'] == 'imap') | (df['threat_type'] == 'warezmaster')
| (df['threat_type'] == 'land') | (df['threat_type'] == 'buffer_overflow') | (df['threat_type'] == 'guess_passwd')
| (df['threat_type'] == 'pod') | (df['threat_type'] == 'teardrop')| (df['threat_type'] == 'warezclient')
| (df['threat_type'] == 'back') | (df['threat_type'] == 'nmap')].index
df.drop(indexNames , inplace=True)
df['threat_type'].value_counts()
###Output
_____no_output_____
###Markdown
https://towardsdatascience.com/how-to-deal-with-imbalanced-multiclass-datasets-in-python-fe0bb3f2b669
###Code
count = df['threat_type'].value_counts()
count.plot.bar()
plt.ylabel('Number of records')
plt.xlabel('Target Class')
plt.show()
n_samples = count.median()#.astype(np.int64)
n_samples
# 34 numerical columns are considered for training
num_df = df[num_col]
# Lets remove the numerical columns with constant value
X = num_df.loc[:, (num_df != num_df.iloc[0]).any()].values
# labelencode the target variable
threat_types = df["threat_type"].values
encoder = LabelEncoder()
# encoder = OneHotEncoder()
# use LabelEncoder to encode the threat types in numeric values
y = encoder.fit_transform(threat_types)
# print(' ')
# print("Shape of target vector is... : ",y.shape)
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=13, stratify=y)
scaler = StandardScaler()
a = scaler.fit(X_train)
X_train = a.transform(X_train)
X_test = scaler.transform(X_test)
# print(y_test)
print(len(np.unique(y_train)), len(np.unique(y_test)))
# unique, counts = np.unique(y_train, return_counts=True)
# unique1, counts1 = np.unique(y_test, return_counts=True)
unknwn1 = (np.array(np.unique(y_train, return_counts=True)).T)
unknwn2 = (np.array(np.unique(y_test, return_counts=True)).T)
print(unknwn1)
print(unknwn2)
# Export as a csv
#num_df.to_csv('processed_KDD_cup.csv', index=False)
###Output
_____no_output_____ |
docs/_static/notebooks/serialization.ipynb | ###Markdown
Serializing a TrialThis guide will explain the two different ways to how to save and reload your results from a Trial.**Note**: The easiest way to use this tutorial is as a colab notebook, which allows you to dive in with no setup. Install TorchbearerFirst we install torchbearer if needed.
###Code
try:
import torchbearer
except:
!pip install -q torchbearer
import torchbearer
print(torchbearer.__version__)
###Output
0.4.0.dev
###Markdown
Setting up a Mock ExampleLet's assume we have a basic binary classification task where we have 100-dimensional samples as input and a binary label as output.Let's also assume that we would like to solve this problem with a 2-layer neural network.Finally, we also want to keep track of the sum of hidden outputs for some arbitrary reason. Therefore we use the state functionality of Torchbearer.We create a state key for the mock sum we wanted to track using state.
###Code
MOCK = torchbearer.state_key('mock')
###Output
_____no_output_____
###Markdown
Here is our basic 2-layer neural network.
###Code
import torch
import torch.nn as nn
class BasicModel(nn.Module):
def __init__(self):
super(BasicModel, self).__init__()
self.linear1 = nn.Linear(100, 25)
self.linear2 = nn.Linear(25, 1)
def forward(self, x, state):
x = self.linear1(x)
# The following step is here to showcase a useless but simple of example a forward method that uses state
state[MOCK] = torch.sum(x)
x = self.linear2(x)
return torch.sigmoid(x)
###Output
_____no_output_____
###Markdown
We create some random training dataset and put them in a DataLoader.
###Code
from torch.utils.data import TensorDataset, DataLoader
n_sample = 100
X = torch.rand(n_sample, 100)
y = torch.randint(0, 2, [n_sample, 1]).float()
traingen = DataLoader(TensorDataset(X, y))
###Output
_____no_output_____
###Markdown
Let's say we would like to save the model every time we get a better training loss. Torchbearer's [`Best` checkpoint callback](https://torchbearer.readthedocs.io/en/latest/code/callbacks.html?highlight=besttorchbearer.callbacks.checkpointers.Best) is perfect for this job. We then run the model for 3 epochs.
###Code
import torch.optim as optim
import torch.nn.functional as F
from torchbearer import Trial
model = BasicModel()
# Create a checkpointer that track val_loss and saves a model.pt whenever we get a better loss
checkpointer = torchbearer.callbacks.checkpointers.Best(filepath='model.pt', monitor='loss')
optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001)
torchbearer_trial = Trial(model, optimizer=optimizer, criterion=F.binary_cross_entropy, metrics=['loss'],
callbacks=[checkpointer])
torchbearer_trial.with_train_generator(traingen)
_ = torchbearer_trial.run(epochs=3)
###Output
_____no_output_____
###Markdown
Reloading the Trial for More EpochsGiven we recreate the exact same Trial structure, we can easily resume our run from the last checkpoint. The following code block shows how it's done. Remember here that the ``epochs`` parameter we pass to Trial acts cumulative. In other words, the following run will complement the entire training to a total of 6 epochs.
###Code
state_dict = torch.load('model.pt')
model = BasicModel()
trial_reloaded = Trial(model, optimizer=optimizer, criterion=F.binary_cross_entropy, metrics=['loss'],
callbacks=[checkpointer])
trial_reloaded.load_state_dict(state_dict)
trial_reloaded.with_train_generator(traingen)
_ = trial_reloaded.run(epochs=6)
###Output
_____no_output_____
###Markdown
Trying to Reload to a PyTorch ModuleWe try to load the ``state_dict`` to a regular PyTorch Module, as described in PyTorch's own documentation [here](https://pytorch.org/docs/stable/notes/serialization.html)
###Code
model = BasicModel()
try:
model.load_state_dict(state_dict)
except AttributeError as e:
print("\n")
print(e)
###Output
'StateKey' object has no attribute 'startswith'
###Markdown
This gives an error. The reason is that the `state_dict` has Trial related attributes that are unknown to a native PyTorch model. This is why we have the `save_model_params_only`option for our checkpointers. We try again with that option
###Code
model = BasicModel()
checkpointer = torchbearer.callbacks.checkpointers.Best(filepath='model.pt', monitor='loss', save_model_params_only=True)
optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001)
torchbearer_trial = Trial(model, optimizer=optimizer, criterion=F.binary_cross_entropy, metrics=['loss'],
callbacks=[checkpointer])
torchbearer_trial.with_train_generator(traingen)
torchbearer_trial.run(epochs=3)
# Try once again to load the module, forward another random sample for testing
state_dict = torch.load('model.pt')
model = BasicModel()
_ = model.load_state_dict(state_dict)
###Output
_____no_output_____
###Markdown
No errors this time, but we still have to test. Here is a test sample and we run it through the model.
###Code
X_test = torch.rand(5, 100)
try:
model(X_test)
except TypeError as e:
print("\n")
print(e)
###Output
forward() missing 1 required positional argument: 'state'
###Markdown
Now we get a different error, stating that we should also be passing ``state`` as an argument to module's forward. This should not be a surprise as we defined ``state`` parameter in the forward method of ``BasicModule`` as a required argument. Robust Signature for ModuleWe define the model with a better signature this time, so it gracefully handles the problem above.
###Code
class BetterSignatureModel(nn.Module):
def __init__(self):
super(BetterSignatureModel, self).__init__()
self.linear1 = nn.Linear(100, 25)
self.linear2 = nn.Linear(25, 1)
def forward(self, x, **state):
x = self.linear1(x)
# Using kwargs instead of state is safer from a serialization perspective
if state is not None:
state = state
state[MOCK] = torch.sum(x)
x = self.linear2(x)
return torch.sigmoid(x)
###Output
_____no_output_____
###Markdown
Finally, we wrap it up once again to test the new definition of the model.
###Code
model = BetterSignatureModel()
checkpointer = torchbearer.callbacks.checkpointers.Best(filepath='model.pt', monitor='loss', save_model_params_only=True)
optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001)
torchbearer_trial = Trial(model, optimizer=optimizer, criterion=F.binary_cross_entropy, metrics=['loss'],
callbacks=[checkpointer])
torchbearer_trial.with_train_generator(traingen)
torchbearer_trial.run(epochs=3)
# This time, the forward function should work without the need for a state argument
state_dict = torch.load('model.pt')
model = BetterSignatureModel()
model.load_state_dict(state_dict)
X_test = torch.rand(5, 100)
model(X_test)
###Output
_____no_output_____ |
11_JSON/11_JSON.ipynb | ###Markdown
JSON Convertion from a Python Object to a JSON Data
###Code
import json
person = {'name': 'John', 'age': 30, 'city': 'New York', 'hasChildren': False,
'titles': ['engineer', 'programmer']}
personJSON = json.dumps(person) # dumps, s stands for a string
print(personJSON)
personJSON = json.dumps(person, indent=4)
print(personJSON)
personJSON = json.dumps(person, indent=4, separators=('; ', '= ')) # not recommended using different separators
print(personJSON)
personJSON = json.dumps(person, indent=4, sort_keys=True)# our keys are sorted alphabetically
print(personJSON)
with open('person.json', 'w') as file:
json.dump(person, file) # not dumps because we want to open it in a file not a string
with open('person.json', 'w') as file:
json.dump(person, file, indent=4)
###Output
_____no_output_____
###Markdown
Convertion from a JSON Data to a Python Object (deserialization/ decoding)
###Code
personJSON = json.dumps(person, indent=4, sort_keys=True)
person = json.loads(personJSON)# loads, s stands for a string
print(person)
with open ('person.json', 'r') as file:
person = json.load(file)
print(person)
###Output
{'name': 'John', 'age': 30, 'city': 'New York', 'hasChildren': False, 'titles': ['engineer', 'programmer']}
###Markdown
Encode custom object
###Code
class User:
def __init__(self, name, age):
self.name = name
self.age = age
user = User('Szymon', 19)
# encode custom object
# custom encoding method
def encode_user(object):
if isinstance (object, User): # checks if our object is an instance of a class
return {'name': object.name, 'age': object.age, object.__class__.__name__: True}
else:
raise TypeError('Object of type User is not JSON serializable')
userJSON = json.dumps(user, default=encode_user)
print(userJSON)
# second way to do the thing above
from json import JSONEncoder
class UserEncoder(JSONEncoder):
def default(self, object):
if isinstance (object, User): # checks if our object is an instance of a class
return {'name': object.name, 'age': object.age, object.__class__.__name__: True}
return JSONEncoder.default(self,o)
userJSON = json.dumps(user, cls=UserEncoder)
print(userJSON)
# third way to do the thing above
userJSON = UserEncoder().encode(user) # user was implemented 2 cells above
print(userJSON)
###Output
{"name": "Szymon", "age": 19, "User": true}
###Markdown
Decode custom object (decode object back)
###Code
user = json.loads(userJSON)
print(user) # it's not a user object
print(type(user))
user = json.loads(userJSON)
# custom decoding method
def decode_user(dictionarry):
if User.__name__ in dictionarry:
return User(name=dictionarry['name'], age=dictionarry['age'])
return dictionarry
user = json.loads(userJSON, object_hook=decode_user)
print(user.name)
print(user.age)
print(type(user))
###Output
Szymon
19
<class '__main__.User'>
|
Copy_of_Copy_of_LS_DSPT3_111_A_First_Look_at_Data.ipynb | ###Markdown
Lambda School Data Science - A First Look at Data Lecture - let's explore Python DS libraries and examples!The Python Data Science ecosystem is huge. You've seen some of the big pieces - pandas, scikit-learn, matplotlib. What parts do you want to see more of?
###Code
#Importing dependcies
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#create a list of 50 random numbers between 0 and 20
np.random.randint(0, 20, size=50)
#created two variables and printed them
test1 = np.random.randint(0, 20, size=10)
test2 = np.random.randint(0, 30, size=10)
print(test1 ,test2)
#plotting our two variables
plt.scatter(test1, test2)
plt.xlabel('test1')
plt.ylabel('test2')
plt.title('test1 vs test2')
plt.show()
###Output
_____no_output_____
###Markdown
Using the power of Matplotlib, i plotted a scatter graph of the two variables we created in the previous cell
###Code
#creating a dataframe from scratch with python lists our our random number list generator function
d = ["Hamilton", "Vettel", "Le Clerc", "Verstappen", "Raikonnen", "Bottas", "Ocon", "Hulkenberg", "Senna", "Alonso"]
f = ["Mercedes", "Ferrari", "Ferrari", "Red Bull", "Alfa", "Mercedes", "Renault", "Racing Point", "Mclaren", "Jordan"]
z = ["Monza", "Silverstone", "Yas Marina", "Hungaroring", "Baku", "Monaco", "Imola", "Spa", "Interlagos", "Albert Park" ]
w = np.random.randint(0, 15, size=10)
l = np.random.randint(0, 70, size=10)
df = pd.DataFrame({ "Driver": d,"F1 Team": f, "Track": z, "Laps-Led": l, "Wins": w})
df
#show the driver column
df['Driver']
#finding the number of columns and rows
df.shape
#finding the types of data we have in our columns
df.dtypes
#printing all the attributes of a particular row
df['Driver'].iloc[0]
df.iloc[0]
df
#using describe function to get some statistical view about our dataframe
df.describe()
###Output
_____no_output_____
###Markdown
This method gives us a general statistical view of our dataframe.
###Code
df
#tried to find the number teams that have won more than 10 races
#Save a boolean into the team
df["F1 Team"] = df['Wins'] > 10
df
df['Laps-Led'] > 10
###Output
_____no_output_____
###Markdown
Assignment - now it's your turnPick at least one Python DS library, and using documentation/examples reproduce in this notebook something cool. It's OK if you don't fully understand it or get it 100% working, but do put in effort and look things up.
###Code
# TODO - your code here
# Use what we did live in lecture as an example
###Output
_____no_output_____ |
week 2/02-insurance-linear.ipynb | ###Markdown
Insurance cost prediction using linear regressionIn this assignment we're going to use information like a person's age, sex, BMI, no. of children and smoking habit to predict the price of yearly medical bills. This kind of model is useful for insurance companies to determine the yearly insurance premium for a person. The dataset for this problem is taken from: https://www.kaggle.com/mirichoi0218/insuranceWe will create a model with the following steps:1. Download and explore the dataset2. Prepare the dataset for training3. Create a linear regression model4. Train the model to fit the data5. Make predictions using the trained modelThis assignment builds upon the concepts from the first 2 lectures. It will help to review these Jupyter notebooks:- PyTorch basics: https://jovian.ml/aakashns/01-pytorch-basics- Linear Regression: https://jovian.ml/aakashns/02-linear-regression- Logistic Regression: https://jovian.ml/aakashns/03-logistic-regression- Linear regression (minimal): https://jovian.ml/aakashns/housing-linear-minimal- Logistic regression (minimal): https://jovian.ml/aakashns/mnist-logistic-minimalAs you go through this notebook, you will find a **???** in certain places. Your job is to replace the **???** with appropriate code or values, to ensure that the notebook runs properly end-to-end . In some cases, you'll be required to choose some hyperparameters (learning rate, batch size etc.). Try to experiment with the hypeparameters to get the lowest loss.
###Code
# Uncomment and run the commands below if imports fail
!conda install numpy pytorch torchvision cpuonly -c pytorch -y
!pip install matplotlib --upgrade --quiet
!pip install jovian --upgrade --quiet
import torch
import jovian
import torchvision
import torch.nn as nn
!pip install pandas
import pandas as pd
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torchvision.datasets.utils import download_url
from torch.utils.data import DataLoader, TensorDataset, random_split
project_name='02-insurance-linear-regression' # will be used by jovian.commit
###Output
_____no_output_____
###Markdown
Step 1: Download and explore the dataLet us begin by downloading the data. We'll use the `download_url` function from PyTorch to get the data as a CSV (comma-separated values) file.
###Code
DATASET_URL = "https://hub.jovian.ml/wp-content/uploads/2020/05/insurance.csv"
DATA_FILENAME = "insurance.csv"
download_url(DATASET_URL, '.')
###Output
Using downloaded and verified file: ./insurance.csv
###Markdown
To load the dataset into memory, we'll use the `read_csv` function from the `pandas` library. The data will be loaded as a Pandas dataframe. See this short tutorial to learn more: https://data36.com/pandas-tutorial-1-basics-reading-data-files-dataframes-data-selection/
###Code
dataframe_raw = pd.read_csv(DATA_FILENAME)
dataframe_raw.head()
###Output
_____no_output_____
###Markdown
We're going to do a slight customization of the data, so that you every participant receives a slightly different version of the dataset. Fill in your name below as a string (enter at least 5 characters)
###Code
your_name = 'Anurag' # at least 5 characters
###Output
_____no_output_____
###Markdown
The `customize_dataset` function will customize the dataset slightly using your name as a source of random numbers.
###Code
def customize_dataset(dataframe_raw, rand_str):
dataframe = dataframe_raw.copy(deep=True)
# drop some rows
dataframe = dataframe.sample(int(0.95*len(dataframe)), random_state=int(ord(rand_str[0])))
# scale input
dataframe.bmi = dataframe.bmi * ord(rand_str[1])/100.
# scale target
dataframe.charges = dataframe.charges * ord(rand_str[2])/100.
# drop column
if ord(rand_str[3]) % 2 == 1:
dataframe = dataframe.drop(['region'], axis=1)
return dataframe
dataframe = customize_dataset(dataframe_raw, your_name)
dataframe.head()
###Output
_____no_output_____
###Markdown
Let us answer some basic questions about the dataset. **Q: How many rows does the dataset have?**
###Code
num_rows = dataframe.shape[0]
print(num_rows)
###Output
1271
###Markdown
**Q: How many columns doe the dataset have**
###Code
num_cols = dataframe.shape[1]
print(num_cols)
###Output
7
###Markdown
**Q: What are the column titles of the input variables?**
###Code
input_cols = ['age','sex','bmi','children','smoker','region','charges']
###Output
_____no_output_____
###Markdown
**Q: Which of the input columns are non-numeric or categorial variables ?**Hint: `sex` is one of them. List the columns that are not numbers.
###Code
categorical_cols = ['sex','smoker','region']
###Output
_____no_output_____
###Markdown
**Q: What are the column titles of output/target variable(s)?**
###Code
output_cols = ['charges']
###Output
_____no_output_____
###Markdown
**Q: (Optional) What is the minimum, maximum and average value of the `charges` column? Can you show the distribution of values in a graph?**Use this data visualization cheatsheet for referece: https://jovian.ml/aakashns/dataviz-cheatsheet
###Code
# Write your answer here
#Min charges
MinC=dataframe['charges'].min()
#Max charge
MaxC=dataframe['charges'].max()
#Average charge
AvgC=dataframe['charges'].mean()
print('Min charges : ',MinC)
print('Max charges :',MaxC)
print('Average charges :',AvgC)
plt.hist(dataframe['charges'])
plt.xlabel('Charges')
plt.ylabel('Frequency')
plt.show()
###Output
Min charges : 1312.592463
Max charges : 74611.4007717
Average charges : 15553.094681738734
###Markdown
Remember to commit your notebook to Jovian after every step, so that you don't lose your work.
###Code
jovian.commit(project=project_name, environment=None)
###Output
_____no_output_____
###Markdown
Step 2: Prepare the dataset for trainingWe need to convert the data from the Pandas dataframe into a PyTorch tensors for training. To do this, the first step is to convert it numpy arrays. If you've filled out `input_cols`, `categorial_cols` and `output_cols` correctly, this following function will perform the conversion to numpy arrays.
###Code
def dataframe_to_arrays(dataframe):
# Make a copy of the original dataframe
dataframe1 = dataframe.copy(deep=True)
# Convert non-numeric categorical columns to numbers
for col in categorical_cols:
dataframe1[col] = dataframe1[col].astype('category').cat.codes
# Extract input & outupts as numpy arrays
inputs_array = dataframe1[input_cols].to_numpy()
targets_array = dataframe1[output_cols].to_numpy()
return inputs_array, targets_array
###Output
_____no_output_____
###Markdown
Read through the [Pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html) to understand how we're converting categorical variables into numbers.
###Code
inputs_array, targets_array = dataframe_to_arrays(dataframe)
inputs_array, targets_array
###Output
_____no_output_____
###Markdown
**Q: Convert the numpy arrays `inputs_array` and `targets_array` into PyTorch tensors. Make sure that the data type is `torch.float32`.**
###Code
inputs = torch.from_numpy(inputs_array).type (torch.float32)
targets = torch.from_numpy(targets_array). type(torch.float32)
inputs.dtype, targets.dtype
###Output
_____no_output_____
###Markdown
Next, we need to create PyTorch datasets & data loaders for training & validation. We'll start by creating a `TensorDataset`.
###Code
dataset = TensorDataset(inputs, targets)
###Output
_____no_output_____
###Markdown
**Q: Pick a number between `0.1` and `0.2` to determine the fraction of data that will be used for creating the validation set. Then use `random_split` to create training & validation datasets. **
###Code
val_percent = 0.2 # between 0.1 and 0.2
val_size = int(num_rows * val_percent)
train_size = num_rows - val_size
train_ds, val_ds = random_split(dataset,[train_size,val_size])# Use the random_split function to split dataset into 2 parts of the desired length
###Output
_____no_output_____
###Markdown
Finally, we can create data loaders for training & validation.**Q: Pick a batch size for the data loader.**
###Code
batch_size = 32
train_loader = DataLoader(train_ds, batch_size, shuffle=True)
val_loader = DataLoader(val_ds, batch_size)
###Output
_____no_output_____
###Markdown
Let's look at a batch of data to verify everything is working fine so far.
###Code
for xb, yb in train_loader:
print("inputs:", xb)
print("targets:", yb)
break
###Output
inputs: tensor([[5.8000e+01, 1.0000e+00, 4.1800e+01, 0.0000e+00, 0.0000e+00, 3.0000e+00,
1.3298e+04],
[4.1000e+01, 0.0000e+00, 3.0855e+01, 1.0000e+00, 0.0000e+00, 2.0000e+00,
7.9211e+03],
[5.0000e+01, 1.0000e+00, 2.9051e+01, 0.0000e+00, 0.0000e+00, 1.0000e+00,
1.0328e+04],
[3.5000e+01, 0.0000e+00, 2.8737e+01, 0.0000e+00, 0.0000e+00, 0.0000e+00,
6.1167e+03],
[2.8000e+01, 0.0000e+00, 2.9161e+01, 2.0000e+00, 0.0000e+00, 2.0000e+00,
5.0783e+03],
[1.8000e+01, 0.0000e+00, 4.4286e+01, 0.0000e+00, 0.0000e+00, 2.0000e+00,
1.9125e+03],
[5.6000e+01, 0.0000e+00, 3.9380e+01, 1.0000e+00, 0.0000e+00, 3.0000e+00,
1.3659e+04],
[3.9000e+01, 0.0000e+00, 2.5603e+01, 3.0000e+00, 0.0000e+00, 0.0000e+00,
9.3442e+03],
[1.9000e+01, 0.0000e+00, 2.2660e+01, 0.0000e+00, 0.0000e+00, 3.0000e+00,
2.0261e+03],
[3.3000e+01, 0.0000e+00, 2.9365e+01, 0.0000e+00, 0.0000e+00, 1.0000e+00,
5.3486e+03],
[1.9000e+01, 1.0000e+00, 4.9368e+01, 0.0000e+00, 1.0000e+00, 2.0000e+00,
4.6476e+04],
[4.3000e+01, 1.0000e+00, 3.0096e+01, 3.0000e+00, 0.0000e+00, 0.0000e+00,
1.0069e+04],
[3.8000e+01, 1.0000e+00, 4.0755e+01, 1.0000e+00, 0.0000e+00, 0.0000e+00,
7.1132e+03],
[4.1000e+01, 1.0000e+00, 3.7631e+01, 1.0000e+00, 0.0000e+00, 2.0000e+00,
7.3590e+03],
[3.0000e+01, 0.0000e+00, 3.6663e+01, 1.0000e+00, 0.0000e+00, 2.0000e+00,
4.8567e+03],
[1.9000e+01, 0.0000e+00, 3.3022e+01, 0.0000e+00, 1.0000e+00, 1.0000e+00,
3.8970e+04],
[3.5000e+01, 1.0000e+00, 2.9810e+01, 1.0000e+00, 0.0000e+00, 3.0000e+00,
5.5532e+03],
[4.9000e+01, 0.0000e+00, 2.6229e+01, 3.0000e+00, 1.0000e+00, 0.0000e+00,
2.8205e+04],
[1.8000e+01, 0.0000e+00, 3.2082e+01, 0.0000e+00, 0.0000e+00, 0.0000e+00,
8.5688e+03],
[6.2000e+01, 0.0000e+00, 4.1904e+01, 2.0000e+00, 0.0000e+00, 0.0000e+00,
1.7819e+04],
[6.1000e+01, 1.0000e+00, 3.9930e+01, 1.0000e+00, 1.0000e+00, 3.0000e+00,
5.5463e+04],
[3.9000e+01, 0.0000e+00, 3.5112e+01, 2.0000e+00, 0.0000e+00, 1.0000e+00,
8.4351e+03],
[5.0000e+01, 0.0000e+00, 3.0976e+01, 3.0000e+00, 0.0000e+00, 2.0000e+00,
1.2522e+04],
[1.8000e+01, 0.0000e+00, 3.6471e+01, 0.0000e+00, 0.0000e+00, 0.0000e+00,
2.5830e+03],
[2.8000e+01, 0.0000e+00, 3.6421e+01, 0.0000e+00, 0.0000e+00, 2.0000e+00,
3.7108e+03],
[1.8000e+01, 1.0000e+00, 3.7510e+01, 0.0000e+00, 0.0000e+00, 2.0000e+00,
1.3303e+03],
[3.3000e+01, 1.0000e+00, 2.4976e+01, 0.0000e+00, 0.0000e+00, 1.0000e+00,
2.5722e+04],
[4.3000e+01, 1.0000e+00, 2.8072e+01, 5.0000e+00, 0.0000e+00, 2.0000e+00,
1.6940e+04],
[3.7000e+01, 0.0000e+00, 3.3880e+01, 2.0000e+00, 0.0000e+00, 2.0000e+00,
7.3871e+03],
[4.5000e+01, 1.0000e+00, 3.3544e+01, 2.0000e+00, 0.0000e+00, 1.0000e+00,
9.8438e+03],
[4.8000e+01, 1.0000e+00, 4.4621e+01, 2.0000e+00, 1.0000e+00, 1.0000e+00,
5.3471e+04],
[3.0000e+01, 0.0000e+00, 2.4139e+01, 1.0000e+00, 0.0000e+00, 0.0000e+00,
5.5203e+03]])
targets: tensor([[13298.1641],
[ 7921.1250],
[10327.8359],
[ 6116.7471],
[ 5078.3159],
[ 1912.4509],
[13658.7324],
[ 9344.1758],
[ 2026.0621],
[ 5348.5532],
[46475.6133],
[10069.2744],
[ 7113.2158],
[ 7359.0132],
[ 4856.7036],
[38969.8359],
[ 5553.2227],
[28205.0879],
[ 8568.7695],
[17819.4785],
[55462.5391],
[ 8435.1055],
[12522.0918],
[ 2583.0061],
[ 3710.7896],
[ 1330.3029],
[25721.8301],
[16939.6465],
[ 7387.0981],
[ 9843.7520],
[53471.3672],
[ 5520.2983]])
###Markdown
Let's save our work by committing to Jovian.
###Code
jovian.commit(project=project_name, environment=None)
###Output
_____no_output_____
###Markdown
Step 3: Create a Linear Regression ModelOur model itself is a fairly straightforward linear regression (we'll build more complex models in the next assignment).
###Code
input_size = len(input_cols)
output_size = len(output_cols)
###Output
_____no_output_____
###Markdown
**Q: Complete the class definition below by filling out the constructor (`__init__`), `forward`, `training_step` and `validation_step` methods.**Hint: Think carefully about picking a good loss fuction (it's not cross entropy). Maybe try 2-3 of them and see which one works best. See https://pytorch.org/docs/stable/nn.functional.htmlloss-functions
###Code
class InsuranceModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(input_size,output_size) # fill this (hint: use input_size & output_size defined above)
def forward(self, xb):
out = self.linear(xb) # fill this
return out
def training_step(self, batch):
inputs, targets = batch
# Generate predictions
out = self(inputs)
# Calcuate loss
loss = F.l1_loss(out,targets) # fill this
return loss
def validation_step(self, batch):
inputs, targets = batch
# Generate predictions
out = self(inputs)
# Calculate loss
loss = F.l1_loss(out,targets) # fill this
return {'val_loss': loss.detach()}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
return {'val_loss': epoch_loss.item()}
def epoch_end(self, epoch, result, num_epochs):
# Print result every 20th epoch
if (epoch+1) % 20 == 0 or epoch == num_epochs-1:
print("Epoch [{}], val_loss: {:.4f}".format(epoch+1, result['val_loss']))
###Output
_____no_output_____
###Markdown
Let us create a model using the `InsuranceModel` class. You may need to come back later and re-run the next cell to reinitialize the model, in case the loss becomes `nan` or `infinity`.
###Code
model = InsuranceModel()
###Output
_____no_output_____
###Markdown
Let's check out the weights and biases of the model using `model.parameters`.
###Code
list(model.parameters())
###Output
_____no_output_____
###Markdown
One final commit before we train the model.
###Code
jovian.commit(project=project_name, environment=None)
###Output
_____no_output_____
###Markdown
Step 4: Train the model to fit the dataTo train our model, we'll use the same `fit` function explained in the lecture. That's the benefit of defining a generic training loop - you can use it for any problem.
###Code
def evaluate(model, val_loader):
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
optimizer = opt_func(model.parameters(), lr)
for epoch in range(epochs):
# Training Phase
for batch in train_loader:
loss = model.training_step(batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Validation phase
result = evaluate(model, val_loader)
model.epoch_end(epoch, result, epochs)
history.append(result)
return history
###Output
_____no_output_____
###Markdown
**Q: Use the `evaluate` function to calculate the loss on the validation set before training.**
###Code
result = evaluate(model,val_loader) # Use the the evaluate function
print(result)
###Output
{'val_loss': 19663.75390625}
###Markdown
We are now ready to train the model. You may need to run the training loop many times, for different number of epochs and with different learning rates, to get a good result. Also, if your loss becomes too large (or `nan`), you may have to re-initialize the model by running the cell `model = InsuranceModel()`. Experiment with this for a while, and try to get to as low a loss as possible. **Q: Train the model 4-5 times with different learning rates & for different number of epochs.**Hint: Vary learning rates by orders of 10 (e.g. `1e-2`, `1e-3`, `1e-4`, `1e-5`, `1e-6`) to figure out what works.
###Code
epochs =100
lr = 1e-5
history1 = fit(epochs, lr, model, train_loader, val_loader)
epochs = 50
lr = 1e-5
history2 = fit(epochs, lr, model, train_loader, val_loader)
epochs = 50
lr = 1e-4
history3 = fit(epochs, lr, model, train_loader, val_loader)
epochs = 150
lr = 1e-6
history4 = fit(epochs, lr, model, train_loader, val_loader)
epochs = 150
lr = 1e-5
history5 = fit(epochs, lr, model, train_loader, val_loader)
losses = [r['val_loss'] for r in [result] + history4]
plt.plot(losses, '-x')
plt.xlabel('epoch')
plt.ylabel('val_loss')
plt.title('val_loss vs. epochs');
###Output
_____no_output_____
###Markdown
**Q: What is the final validation loss of your model?**
###Code
val_loss = 91.2261
###Output
_____no_output_____
###Markdown
Let's log the final validation loss to Jovian and commit the notebook
###Code
jovian.log_metrics(val_loss=val_loss)
jovian.commit(project=project_name, environment=None)
###Output
_____no_output_____
###Markdown
Now scroll back up, re-initialize the model, and try different set of values for batch size, number of epochs, learning rate etc. Commit each experiment and use the "Compare" and "View Diff" options on Jovian to compare the different results. Step 5: Make predictions using the trained model**Q: Complete the following function definition to make predictions on a single input**
###Code
def predict_single(input, target, model):
inputs = input.unsqueeze(0)
predictions = model(inputs) # fill this
prediction = predictions[0].detach()
print("Input:", input)
print("Target:", target)
print("Prediction:", prediction)
input, target = val_ds[0]
predict_single(input, target, model)
input, target = val_ds[10]
predict_single(input, target, model)
input, target = val_ds[23]
predict_single(input, target, model)
###Output
Input: tensor([5.9000e+01, 1.0000e+00, 3.2813e+01, 3.0000e+00, 1.0000e+00, 0.0000e+00,
3.5316e+04])
Target: tensor([35316.3750])
Prediction: tensor([41018.1133])
###Markdown
Are you happy with your model's predictions? Try to improve them further. (Optional) Step 6: Try another dataset & blog about itWhile this last step is optional for the submission of your assignment, we highly recommend that you do it. Try to clean up & replicate this notebook (or [this one](https://jovian.ml/aakashns/housing-linear-minimal), or [this one](https://jovian.ml/aakashns/mnist-logistic-minimal) ) for a different linear regression or logistic regression problem. This will help solidify your understanding, and give you a chance to differentiate the generic patters in machine learning from problem-specific details.Here are some sources to find good datasets:- https://lionbridge.ai/datasets/10-open-datasets-for-linear-regression/- https://www.kaggle.com/rtatman/datasets-for-regression-analysis- https://archive.ics.uci.edu/ml/datasets.php?format=&task=reg&att=&area=&numAtt=&numIns=&type=&sort=nameUp&view=table- https://people.sc.fsu.edu/~jburkardt/datasets/regression/regression.html- https://archive.ics.uci.edu/ml/datasets/wine+quality- https://pytorch.org/docs/stable/torchvision/datasets.htmlWe also recommend that you write a blog about your approach to the problem. Here is a suggested structure for your post (feel free to experiment with it):- Interesting title & subtitle- Overview of what the blog covers (which dataset, linear regression or logistic regression, intro to PyTorch)- Downloading & exploring the data- Preparing the data for training- Creating a model using PyTorch- Training the model to fit the data- Your thoughts on how to experiment with different hyperparmeters to reduce loss- Making predictions using the modelAs with the previous assignment, you can [embed Juptyer notebook cells & outputs from Jovian](https://medium.com/jovianml/share-and-embed-jupyter-notebooks-online-with-jovian-ml-df709a03064e) into your blog. Don't forget to share your work on the forum: https://jovian.ml/forum/t/share-your-work-here-assignment-2/4931
###Code
jovian.commit(project=project_name, environment=None)
jovian.commit(project=project_name, environment=None) # try again, kaggle fails sometimes
###Output
_____no_output_____ |
book/_build/jupyter_execute/pandas/06-Renaming Columns and Replace Value.ipynb | ###Markdown
Renaming Columns
###Code
import pandas as pd
# read a dataset of UFO reports into a DataFrame
ufo = pd.read_csv('http://bit.ly/uforeports')
# examine the first 5 rows
ufo.head()
# examine the column names
ufo.columns
# rename two of the columns by useing `rename` method
ufo.rename(columns={'Colors Reported': 'Colors_Reported', 'Shape Reported': 'Shape_Reported'}, inplace=True)
ufo.head()
# replace all of the column names by overwritting the 'colums' attribute
ufo_cols = ['city', 'colors reported', 'shape reported', 'state', 'time']
ufo.columns = ufo_cols
# see modified columns
ufo.columns
# replace the column names during the file reading process by using the 'names' parameter
ufo = pd.read_csv('http://bit.ly/uforeports', names=ufo_cols)
# examine the 5 rows
ufo.head()
# replace all spaces with underscores in the column names by using the 'str.replace' method
ufo.columns = ufo.columns.str.replace(' ', '_')
ufo.columns
# let's look at DataFrame
ufo.head()
###Output
_____no_output_____
###Markdown
Replace
###Code
# read another dataset
fm = pd.read_csv("../data/framingham.csv")
# examine first few rows
fm.head()
# first rename `male` to `sex`
fm.rename(columns={"male": "sex"}, inplace=True)
# Now take a look at dataset
fm.head()
###Output
_____no_output_____
###Markdown
Replace Value for Better Understanding of Dataset__sex__* 1 = Male * 0 = Female __diabetes__* 1 = Yes * 0 = No
###Code
# replace sex column value
fm['sex'].replace({1: "male", 0: "female"}, inplace=True)
# replace diabetes column value
fm['diabetes'].replace({1: "yes", 0: "no"}, inplace=True)
# Examine dataset
fm.head()
###Output
_____no_output_____ |
midterm/.ipynb_checkpoints/Midterm-checkpoint.ipynb | ###Markdown
Midterm 2 - MNIST Classification![front_page.png](attachment:front_page.png) Preparing environment and dataset:* pip install python-mnist* create folder midterm/data* download from https://drive.google.com/open?id=1AQwyy3xP7rjDWMPkWBW4kKOfpkIyAWt8 - 4 files* extract all files to ./data The error of your classifier on test dataset must be better then 12.0% LeCun et al. 1998 Enter your error at https://goo.gl/forms/JRDKcotcXf5LZM3I3 Commit your code to github/bitbucket into folder midterm
###Code
from mnist import MNIST
import random
mndata = MNIST('.\\data')
trimages, trlabels = mndata.load_training()
teimages, telabels = mndata.load_testing()
index = random.randrange(0, len(trimages)) # choose an index ;-)
print('The amount of train images',len(trimages))
print('The amount of test images',len(trimages))
print('The label of random image',trlabels[index],'The random image is',mndata.display(trimages[index]))
print('Images are binary with 28*28 = ',len(trimages[index]))
y = to_categorical(trlabels)
X = np.array(trimages)
model = Sequential()
model.add(Dense(128, input_dim=784, init='uniform', activation='relu'))
model.add(Dense(64, init='uniform', activation='relu'))
model.add(Dense(y.shape[1], init='uniform', activation='sigmoid'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(X, y, epochs=100, batch_size=2000, verbose=1)
from sklearn.metrics import accuracy_score
teX = np.array(teimages)
accuracy_score(model.predict_classes(teX), np.array(telabels))
accuracy_score(model.predict_classes(X), np.array(trlabels))
###Output
_____no_output_____ |
Code/hw1.ipynb | ###Markdown
共118列:+ 0:id+ 1~37: State+ 38~41: COVID-like illness (5天)+ 42~49: Behavior Indicators (5天)+ 50~52: Medical Health Indicators (5天)+ 53: Tested Positive Cases (5天)$$1 + 37 + 3\times 5 + 8\times 5 + 3\times 5 + 1\times 5 = 118$$ Dataset+ 统计信息+ 缺失值+ 特征相关性+ 特征规约
###Code
# 去掉'id'列
coulmns = train_csv.columns
train_dataset = train_csv[coulmns[1:]]
coulmns = test_csv.columns
test_dataset = test_csv[coulmns[1:]]
# 查看每一列Nan值的个数
nacount = train_dataset.isna().sum()
print(f"Nan元素总数:{nacount.sum()}")
print(nacount)
import csv
def save_pred(preds, save_path):
with open(save_path, 'w') as f:
writer = csv.writer(f)
writer.writerow(['id', 'tested_positive'])
for i, p in enumerate(preds):
writer.writerow([i, p])
###Output
_____no_output_____
###Markdown
Baseline**All original features, linear regression**
###Code
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
def cross_val(model, x, y):
scores = cross_val_score(model, x, y, scoring='neg_mean_squared_error', cv=10)
print(np.mean(np.sqrt(-scores)))
lin_reg = LinearRegression()
# 使用全部原始特征
cross_val(lin_reg, train_dataset.iloc[:, :-1], train_dataset.iloc[:, -1])
# save test result
lin_reg = LinearRegression()
lin_reg.fit(train_dataset.iloc[:, :-1], train_dataset.iloc[:, -1])
preds = lin_reg.predict(test_dataset)
save_pred(preds, './plain_line_reg.txt')
###Output
_____no_output_____
###Markdown
**All original features, decision tree**
###Code
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor()
# 使用全部原始特征
cross_val(tree_reg, train_dataset.iloc[:, :-1], train_dataset.iloc[:, -1])
# save test results
tree_reg = DecisionTreeRegressor()
tree_reg.fit(train_dataset.iloc[:, :-1], train_dataset.iloc[:, -1])
preds = tree_reg.predict(test_dataset)
save_pred(preds, './plain_tree_reg.txt')
###Output
_____no_output_____
###Markdown
Feature Selection
###Code
# 特征与特征之间的相关性矩阵
corr_matrix = train_dataset.corr()
# 查看与test_positive.4与其他特征之间的相关性
positive4_coor = corr_matrix['tested_positive.4'].sort_values(ascending=False)
mask = positive4_coor > 0.5
print(f'相关性大于0.5的特征数:{sum(mask)}')
# print(positive4_coor[mask])
###Output
相关性大于0.5的特征数:35
###Markdown
**Selected Original features, linear regression**
###Code
lin_reg = LinearRegression()
# 使用相关性较大的若干原始特征
selcted_columns = list(positive4_coor.index[mask])
cross_val(lin_reg, train_dataset[selcted_columns[1:]], train_dataset[selcted_columns[0]])
# save test result
lin_reg = LinearRegression()
lin_reg.fit(train_dataset[selcted_columns[1:]], train_dataset[selcted_columns[0]])
preds = lin_reg.predict(test_dataset[selcted_columns[1:]])
save_pred(preds, './plain_line_reg_with_feature_selction.txt')
###Output
_____no_output_____
###Markdown
**Selected Original features, Decision Tree**
###Code
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor()
# 使用相关性较大的若干原始特征
selcted_columns = list(positive4_coor.index[mask])
cross_val(tree_reg, train_dataset[selcted_columns[1:]], train_dataset[selcted_columns[0]])
# save test result
tree_reg = LinearRegression()
tree_reg.fit(train_dataset[selcted_columns[1:]], train_dataset[selcted_columns[0]])
preds = tree_reg.predict(test_dataset[selcted_columns[1:]])
save_pred(preds, './plain_tree_reg_with_feature_selction.txt')
###Output
_____no_output_____
###Markdown
**Feature Scaling**
###Code
from sklearn.base import BaseEstimator, TransformerMixin
class NormalScaler(BaseEstimator, TransformerMixin):
def __init__(self, skip=None):
self.skip = skip
def fit(self, x, y=None):
return self
def transform(self, x):
for col in x.columns:
if self.skip not in col and x[col].max() > 1:
mean = x[col].mean()
std = x[col].std()
x[col] = x[col].map(lambda i: (i - mean) / std)
return x
class MaxminScaler(BaseEstimator, TransformerMixin):
def __init__(self, skip=None):
self.skip = skip
def fit(self, x, y=None):
return self
def transform(self, x):
for col in x.columns:
if self.skip not in col and x[col].max() > 1:
max_v = x[col].max()
min_v = x[col].min()
x[col] = x[col].map(lambda i: (i - min_v) / (max_v - min_v))
return x
from sklearn.pipeline import Pipeline
normal_scaled_pipeline = Pipeline([('std_scaler', NormalScaler('tested_positive'))])
train_dataset_normal_scaled = normal_scaled_pipeline.transform(train_dataset.copy())
test_dataset_normal_scaled = normal_scaled_pipeline.transform(test_dataset.copy())
maxmin_scaled_pipeline = Pipeline([('maxmin_scaler', MaxminScaler('tested_positive'))])
train_dataset_maxmin_scaled = maxmin_scaled_pipeline.transform(train_dataset.copy())
test_dataset_maxmin_scaled = maxmin_scaled_pipeline.transform(test_dataset.copy())
train_dataset_normal_scaled.head()
train_dataset_maxmin_scaled.head()
lin_reg = LinearRegression()
cross_val(lin_reg, train_dataset_normal_scaled.iloc[:, :-1], train_dataset_normal_scaled.iloc[:, -1])
lin_reg = LinearRegression()
cross_val(lin_reg, train_dataset_maxmin_scaled.iloc[:, :-1], train_dataset_maxmin_scaled.iloc[:, -1])
# save test result
lin_reg = LinearRegression()
lin_reg.fit(train_dataset_normal_scaled.iloc[:, :-1], train_dataset_normal_scaled.iloc[:, -1])
preds = lin_reg.predict(test_dataset_normal_scaled)
save_pred(preds, './line_reg_with_normal_scaled_features.txt')
# save test result
lin_reg = LinearRegression()
lin_reg.fit(train_dataset_maxmin_scaled.iloc[:, :-1], train_dataset_maxmin_scaled.iloc[:, -1])
preds = lin_reg.predict(test_dataset_maxmin_scaled)
save_pred(preds, './tree_reg_with_normal_scaled_features.txt')
# 特征与特征之间的相关性矩阵
corr_matrix = train_dataset_normal_scaled.corr()
# 查看与test_positive.4与其他特征之间的相关性
positive4_coor = corr_matrix['tested_positive.4'].sort_values(ascending=False)
scores = []
thrs = []
for thr in np.linspace(0.1, 0.9, num=100):
mask = positive4_coor > thr
# print(f'相关性大于{thr}的特征数:{sum(mask)}')
thrs.append(thr)
lin_reg = LinearRegression()
# 使用相关性较大的若干原始特征
selcted_columns = list(positive4_coor.index[mask])
x, y = train_dataset_normal_scaled[selcted_columns[1:]], train_dataset_normal_scaled[selcted_columns[0]]
score_list = cross_val_score(lin_reg, x, y, scoring='neg_mean_squared_error', cv=10)
scores.append(np.mean(np.sqrt(-score_list)))
print(f"minimal score: {min(scores)}, index {np.argmin(scores)}, thr: {thrs[np.argmin(scores)]}")
# save test result
# 特征与特征之间的相关性矩阵
corr_matrix = train_dataset_normal_scaled.corr()
# 查看与test_positive.4与其他特征之间的相关性
positive4_coor = corr_matrix['tested_positive.4'].sort_values(ascending=False)
mask = positive4_coor > thrs[np.argmin(scores)]
# 使用相关性较大的若干原始特征
selcted_columns = list(positive4_coor.index[mask])
x = train_dataset_normal_scaled[selcted_columns[1:]]
y = train_dataset_normal_scaled[selcted_columns[0]]
lin_reg = LinearRegression()
lin_reg.fit(x, y)
preds = lin_reg.predict(test_dataset_normal_scaled[selcted_columns[1:]])
save_pred(preds, './line_reg_with_selected_normal_scaled_features.txt')
###Output
_____no_output_____
###Markdown
Deep Learning
###Code
from torch.utils.data import Dataset, DataLoader, random_split
import numpy as np
import pandas as pd
import sklearn
import torch
from tqdm import tqdm
from pathlib import Path
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
def same_seed(seed):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def train_valid_split(dataset, valid_ratio, seed):
valid_set_size = int(valid_ratio * len(dataset))
train_set_size = len(dataset) - valid_set_size
train_set, valid_set = random_split(dataset, [train_set_size, valid_set_size], generator=torch.Generator().manual_seed(seed))
return np.array(train_set), np.array(valid_set)
class COVID19Dataset(Dataset):
def __init__(self, x, y=None):
if y is not None:
self.y = torch.from_numpy(y)
else:
self.y = y
self.x = torch.from_numpy(x)
def __len__(self):
return len(self.x)
def __getitem__(self, item):
if self.y is None:
return self.x[item]
else:
return self.x[item], self.y[item]
class Model(torch.nn.Module):
def __init__(self, input_channel):
super(Model, self).__init__()
self.linear1 = torch.nn.Linear(input_channel, 32)
self.linear2 = torch.nn.Linear(32, 16)
self.act = torch.nn.ReLU(inplace=True)
self.linear3 = torch.nn.Linear(16, 1)
def forward(self, x):
x = self.act(self.linear1(x))
# x = self.dropout(x)
x = self.act(self.linear2(x))
x = self.linear3(x)
# x = self.linear4(x)
return x
###Output
_____no_output_____
###Markdown
Training
###Code
from sklearn.model_selection import train_test_split
same_seed(77)
dataset = pd.read_csv(train_data_path)
dataset = dataset[dataset.columns[1:]] # remove 'id' column
corr_matrix = dataset.corr()
target_coor = corr_matrix['tested_positive.4'].sort_values(ascending=False)
mask = target_coor > 0.5
print(f"selected features num: {np.sum(mask)}")
selected_feature_idx = list(target_coor.index[mask])
x_dataset = dataset[selected_feature_idx[1:]]
y_dataset = dataset.iloc[:, -1]
x_train, x_val, y_train, y_val = train_test_split(x_dataset, y_dataset, test_size=0.2, random_state=77)
print(x_train.shape, x_val.shape, y_train.shape, y_val.shape)
train_loader = DataLoader(COVID19Dataset(x_train.values, y_train.values),
batch_size=16,
shuffle=True,
num_workers=0,
drop_last=True)
val_loader = DataLoader(COVID19Dataset(x_val.values, y_val.values),
batch_size=8,
shuffle=False)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Model(input_channel=len(selected_feature_idx[1:])).to(device=device)
loss_fcn = torch.nn.MSELoss(reduction='mean')
optimizer = torch.optim.SGD(model.parameters(), lr=0.00001, momentum=0.9, weight_decay=1e-4, nesterov=True)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-4)
total_epoch = 600
for epoch in range(total_epoch):
with tqdm(train_loader, total=len(train_loader)) as tbar:
tbar.set_description(f"epoch {epoch+1}/{total_epoch}")
for i, (x, y) in enumerate(train_loader):
# print(x, y)
model.train()
x = x.float().to(device)
preds = model(x).squeeze(dim=1)
loss = loss_fcn(y.float().to(device), preds)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % len(train_loader) == 0:
tot_mse = []
for j, (x, y) in enumerate(val_loader):
model.eval()
preds = model(x.float().to(device)).squeeze(dim=1)
tot_mse.append(loss_fcn(y.float().to(device), preds).detach().cpu().numpy())
tbar.set_postfix_str(f'train loss {loss.item():.3f} ; val loss {np.mean(tot_mse):.3f}')
tbar.update(1)
# for j, (x, y) in enumerate(val_loader):
# model.eval()
# preds = model(x.float().to(device)).squeeze(dim=1)
# print(preds, y, sep='\n')
# print(f"{'=' * 80}")
test_dataset = pd.read_csv(test_data_path)
test_dataset = test_dataset[test_dataset.columns[1:]]
test_dataset = test_dataset[selected_feature_idx[1:]]
test_loader = DataLoader(COVID19Dataset(test_dataset.values, None), batch_size=16, shuffle=False)
model.eval()
preds_all = []
for x in test_loader:
x = x.float().to(device)
preds = model(x).detach().cpu().numpy().squeeze()
# print(preds)
preds_all.extend(preds)
save_pred(preds_all, "./dl_selected_original_feature_adamw.txt")
###Output
_____no_output_____
###Markdown
使用所有训练数据进行训练
###Code
same_seed(77)
dataset = pd.read_csv(train_data_path)
dataset = dataset[dataset.columns[1:]] # remove 'id' column
feature_process_pipeline = Pipeline([('maxmin_scaler', MaxminScaler('tested_positive.4'))]) # 对除了target column之外的feature值进行规约化
dataset = feature_process_pipeline.transform(dataset.copy())
corr_matrix = dataset.corr()
target_coor = corr_matrix['tested_positive.4'].sort_values(ascending=False)
mask = target_coor > 0.5 # 选择与target相关性大于0.5的feature参与训练
selected_feature_idx = list(target_coor.index[mask])
x_dataset = dataset[selected_feature_idx[1:]]
y_dataset = dataset.iloc[:, -1]
x_train, x_val, y_train, y_val = train_test_split(x_dataset, y_dataset, test_size=0.2, random_state=77) # 划分测试集和验证集
print(x_train.shape, x_val.shape, y_train.shape, y_val.shape)
x_train.head()
train_loader = DataLoader(COVID19Dataset(x_dataset.values, y_dataset.values), # 使用全部的训练数据
batch_size=16,
shuffle=True,
num_workers=0,
drop_last=True)
val_loader = DataLoader(COVID19Dataset(x_val.values, y_val.values), # 从训练数据中拿出一部分测试(其实这部分数据也参加了训练)
batch_size=8,
shuffle=False)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Model(input_channel=len(selected_feature_idx[1:])).to(device=device)
loss_fcn = torch.nn.MSELoss(reduction='mean')
optimizer = torch.optim.SGD(model.parameters(), lr=0.0001, momentum=0.9, weight_decay=1e-4, nesterov=True)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50, 100, 250, 300, 400], gamma=0.1)
total_epoch = 500
for epoch in range(total_epoch):
with tqdm(train_loader, total=len(train_loader)) as tbar:
tbar.set_description(f"{epoch+1}/{total_epoch}")
for i, (x, y) in enumerate(train_loader):
# print(x, y)
model.train()
x = x.float().to(device)
preds = model(x).squeeze(dim=1)
loss = loss_fcn(y.float().to(device), preds)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 100 == 0:
tot_mse = []
for j, (x, y) in enumerate(val_loader):
model.eval()
preds = model(x.float().to(device)).squeeze(dim=1)
tot_mse.append(loss_fcn(y.float().to(device), preds).detach().cpu().numpy())
tbar.set_postfix_str(f'train loss {loss.item():.3f} ; val loss {np.mean(tot_mse):.3f}; lr {lr_scheduler.get_last_lr()[0]:.2e}')
tbar.update(1)
lr_scheduler.step()
###Output
1/500: 100%|██████████| 168/168 [00:00<00:00, 838.06it/s, train loss 7.519 ; val loss 9.416; lr 1.00e-04]
2/500: 100%|██████████| 168/168 [00:00<00:00, 1039.66it/s, train loss 2.924 ; val loss 5.910; lr 1.00e-04]
3/500: 100%|██████████| 168/168 [00:00<00:00, 1020.91it/s, train loss 9.121 ; val loss 5.261; lr 1.00e-04]
4/500: 100%|██████████| 168/168 [00:00<00:00, 1033.44it/s, train loss 1.112 ; val loss 4.151; lr 1.00e-04]
5/500: 100%|██████████| 168/168 [00:00<00:00, 979.52it/s, train loss 2.098 ; val loss 3.442; lr 1.00e-04]
6/500: 100%|██████████| 168/168 [00:00<00:00, 1039.81it/s, train loss 2.406 ; val loss 2.882; lr 1.00e-04]
7/500: 100%|██████████| 168/168 [00:00<00:00, 1046.28it/s, train loss 3.055 ; val loss 2.804; lr 1.00e-04]
8/500: 100%|██████████| 168/168 [00:00<00:00, 1008.86it/s, train loss 1.529 ; val loss 2.255; lr 1.00e-04]
9/500: 100%|██████████| 168/168 [00:00<00:00, 973.86it/s, train loss 1.171 ; val loss 2.098; lr 1.00e-04]
10/500: 100%|██████████| 168/168 [00:00<00:00, 1033.44it/s, train loss 2.313 ; val loss 1.987; lr 1.00e-04]
11/500: 100%|██████████| 168/168 [00:00<00:00, 1046.28it/s, train loss 1.487 ; val loss 1.914; lr 1.00e-04]
12/500: 100%|██████████| 168/168 [00:00<00:00, 1026.96it/s, train loss 1.320 ; val loss 1.829; lr 1.00e-04]
13/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 1.534 ; val loss 1.856; lr 1.00e-04]
14/500: 100%|██████████| 168/168 [00:00<00:00, 1027.14it/s, train loss 1.687 ; val loss 1.724; lr 1.00e-04]
15/500: 100%|██████████| 168/168 [00:00<00:00, 985.10it/s, train loss 2.093 ; val loss 1.676; lr 1.00e-04]
16/500: 100%|██████████| 168/168 [00:00<00:00, 910.40it/s, train loss 3.668 ; val loss 1.667; lr 1.00e-04]
17/500: 100%|██████████| 168/168 [00:00<00:00, 1039.83it/s, train loss 1.658 ; val loss 1.661; lr 1.00e-04]
18/500: 100%|██████████| 168/168 [00:00<00:00, 1020.96it/s, train loss 1.289 ; val loss 1.561; lr 1.00e-04]
19/500: 100%|██████████| 168/168 [00:00<00:00, 1033.69it/s, train loss 1.156 ; val loss 1.599; lr 1.00e-04]
20/500: 100%|██████████| 168/168 [00:00<00:00, 962.73it/s, train loss 2.639 ; val loss 1.490; lr 1.00e-04]
21/500: 100%|██████████| 168/168 [00:00<00:00, 979.19it/s, train loss 1.517 ; val loss 1.495; lr 1.00e-04]
22/500: 100%|██████████| 168/168 [00:00<00:00, 1021.04it/s, train loss 0.918 ; val loss 1.468; lr 1.00e-04]
23/500: 100%|██████████| 168/168 [00:00<00:00, 1027.14it/s, train loss 1.806 ; val loss 1.413; lr 1.00e-04]
24/500: 100%|██████████| 168/168 [00:00<00:00, 1052.80it/s, train loss 1.001 ; val loss 1.393; lr 1.00e-04]
25/500: 100%|██████████| 168/168 [00:00<00:00, 1008.87it/s, train loss 1.355 ; val loss 1.383; lr 1.00e-04]
26/500: 100%|██████████| 168/168 [00:00<00:00, 1002.69it/s, train loss 1.535 ; val loss 1.349; lr 1.00e-04]
27/500: 100%|██████████| 168/168 [00:00<00:00, 1033.44it/s, train loss 2.887 ; val loss 1.329; lr 1.00e-04]
28/500: 100%|██████████| 168/168 [00:00<00:00, 1020.73it/s, train loss 1.399 ; val loss 1.305; lr 1.00e-04]
29/500: 100%|██████████| 168/168 [00:00<00:00, 1033.22it/s, train loss 1.428 ; val loss 1.314; lr 1.00e-04]
30/500: 100%|██████████| 168/168 [00:00<00:00, 1039.74it/s, train loss 0.856 ; val loss 1.315; lr 1.00e-04]
31/500: 100%|██████████| 168/168 [00:00<00:00, 1066.19it/s, train loss 5.311 ; val loss 1.320; lr 1.00e-04]
32/500: 100%|██████████| 168/168 [00:00<00:00, 1033.44it/s, train loss 0.532 ; val loss 1.242; lr 1.00e-04]
33/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 0.770 ; val loss 1.398; lr 1.00e-04]
34/500: 100%|██████████| 168/168 [00:00<00:00, 973.70it/s, train loss 0.957 ; val loss 1.224; lr 1.00e-04]
35/500: 100%|██████████| 168/168 [00:00<00:00, 1046.09it/s, train loss 1.769 ; val loss 1.210; lr 1.00e-04]
36/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 1.728 ; val loss 1.213; lr 1.00e-04]
37/500: 100%|██████████| 168/168 [00:00<00:00, 1066.33it/s, train loss 3.686 ; val loss 1.212; lr 1.00e-04]
38/500: 100%|██████████| 168/168 [00:00<00:00, 1059.71it/s, train loss 0.601 ; val loss 1.178; lr 1.00e-04]
39/500: 100%|██████████| 168/168 [00:00<00:00, 1027.13it/s, train loss 1.844 ; val loss 1.198; lr 1.00e-04]
40/500: 100%|██████████| 168/168 [00:00<00:00, 996.75it/s, train loss 1.703 ; val loss 1.175; lr 1.00e-04]
41/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 1.267 ; val loss 1.185; lr 1.00e-04]
42/500: 100%|██████████| 168/168 [00:00<00:00, 1079.59it/s, train loss 1.554 ; val loss 1.155; lr 1.00e-04]
43/500: 100%|██████████| 168/168 [00:00<00:00, 1079.81it/s, train loss 1.409 ; val loss 1.154; lr 1.00e-04]
44/500: 100%|██████████| 168/168 [00:00<00:00, 1039.81it/s, train loss 0.402 ; val loss 1.169; lr 1.00e-04]
45/500: 100%|██████████| 168/168 [00:00<00:00, 1033.44it/s, train loss 1.932 ; val loss 1.143; lr 1.00e-04]
46/500: 100%|██████████| 168/168 [00:00<00:00, 1046.09it/s, train loss 0.365 ; val loss 1.134; lr 1.00e-04]
47/500: 100%|██████████| 168/168 [00:00<00:00, 1039.83it/s, train loss 0.503 ; val loss 1.214; lr 1.00e-04]
48/500: 100%|██████████| 168/168 [00:00<00:00, 1066.24it/s, train loss 2.356 ; val loss 1.145; lr 1.00e-04]
49/500: 100%|██████████| 168/168 [00:00<00:00, 1059.44it/s, train loss 0.623 ; val loss 1.156; lr 1.00e-04]
50/500: 100%|██████████| 168/168 [00:00<00:00, 1073.13it/s, train loss 1.483 ; val loss 1.180; lr 1.00e-04]
51/500: 100%|██████████| 168/168 [00:00<00:00, 1046.28it/s, train loss 0.413 ; val loss 1.134; lr 1.00e-05]
52/500: 100%|██████████| 168/168 [00:00<00:00, 1046.29it/s, train loss 1.589 ; val loss 1.129; lr 1.00e-05]
53/500: 100%|██████████| 168/168 [00:00<00:00, 1053.00it/s, train loss 1.684 ; val loss 1.137; lr 1.00e-05]
54/500: 100%|██████████| 168/168 [00:00<00:00, 973.90it/s, train loss 1.027 ; val loss 1.131; lr 1.00e-05]
55/500: 100%|██████████| 168/168 [00:00<00:00, 1052.78it/s, train loss 1.757 ; val loss 1.129; lr 1.00e-05]
56/500: 100%|██████████| 168/168 [00:00<00:00, 1066.16it/s, train loss 0.483 ; val loss 1.135; lr 1.00e-05]
57/500: 100%|██████████| 168/168 [00:00<00:00, 1059.44it/s, train loss 1.074 ; val loss 1.139; lr 1.00e-05]
58/500: 100%|██████████| 168/168 [00:00<00:00, 1046.28it/s, train loss 1.611 ; val loss 1.134; lr 1.00e-05]
59/500: 100%|██████████| 168/168 [00:00<00:00, 1046.29it/s, train loss 1.148 ; val loss 1.130; lr 1.00e-05]
60/500: 100%|██████████| 168/168 [00:00<00:00, 1027.15it/s, train loss 1.103 ; val loss 1.127; lr 1.00e-05]
61/500: 100%|██████████| 168/168 [00:00<00:00, 1065.94it/s, train loss 2.395 ; val loss 1.126; lr 1.00e-05]
62/500: 100%|██████████| 168/168 [00:00<00:00, 1066.10it/s, train loss 3.181 ; val loss 1.127; lr 1.00e-05]
63/500: 100%|██████████| 168/168 [00:00<00:00, 1052.97it/s, train loss 0.554 ; val loss 1.128; lr 1.00e-05]
64/500: 100%|██████████| 168/168 [00:00<00:00, 1059.63it/s, train loss 1.110 ; val loss 1.131; lr 1.00e-05]
65/500: 100%|██████████| 168/168 [00:00<00:00, 1059.63it/s, train loss 0.655 ; val loss 1.136; lr 1.00e-05]
66/500: 100%|██████████| 168/168 [00:00<00:00, 1046.05it/s, train loss 0.460 ; val loss 1.128; lr 1.00e-05]
67/500: 100%|██████████| 168/168 [00:00<00:00, 1033.27it/s, train loss 1.695 ; val loss 1.130; lr 1.00e-05]
68/500: 100%|██████████| 168/168 [00:00<00:00, 1072.97it/s, train loss 0.743 ; val loss 1.126; lr 1.00e-05]
69/500: 100%|██████████| 168/168 [00:00<00:00, 1066.14it/s, train loss 0.684 ; val loss 1.126; lr 1.00e-05]
70/500: 100%|██████████| 168/168 [00:00<00:00, 1046.48it/s, train loss 0.747 ; val loss 1.129; lr 1.00e-05]
71/500: 100%|██████████| 168/168 [00:00<00:00, 1072.73it/s, train loss 1.287 ; val loss 1.130; lr 1.00e-05]
72/500: 100%|██████████| 168/168 [00:00<00:00, 1059.45it/s, train loss 0.853 ; val loss 1.128; lr 1.00e-05]
73/500: 100%|██████████| 168/168 [00:00<00:00, 1059.43it/s, train loss 0.722 ; val loss 1.126; lr 1.00e-05]
74/500: 100%|██████████| 168/168 [00:00<00:00, 1046.27it/s, train loss 0.745 ; val loss 1.134; lr 1.00e-05]
75/500: 100%|██████████| 168/168 [00:00<00:00, 1039.81it/s, train loss 1.248 ; val loss 1.124; lr 1.00e-05]
76/500: 100%|██████████| 168/168 [00:00<00:00, 1052.80it/s, train loss 1.923 ; val loss 1.127; lr 1.00e-05]
77/500: 100%|██████████| 168/168 [00:00<00:00, 1046.28it/s, train loss 0.888 ; val loss 1.125; lr 1.00e-05]
78/500: 100%|██████████| 168/168 [00:00<00:00, 1046.09it/s, train loss 2.015 ; val loss 1.124; lr 1.00e-05]
79/500: 100%|██████████| 168/168 [00:00<00:00, 1086.77it/s, train loss 1.849 ; val loss 1.124; lr 1.00e-05]
80/500: 100%|██████████| 168/168 [00:00<00:00, 957.10it/s, train loss 2.059 ; val loss 1.125; lr 1.00e-05]
81/500: 100%|██████████| 168/168 [00:00<00:00, 915.50it/s, train loss 0.609 ; val loss 1.124; lr 1.00e-05]
82/500: 100%|██████████| 168/168 [00:00<00:00, 1014.76it/s, train loss 2.854 ; val loss 1.127; lr 1.00e-05]
83/500: 100%|██████████| 168/168 [00:00<00:00, 1066.29it/s, train loss 1.323 ; val loss 1.134; lr 1.00e-05]
84/500: 100%|██████████| 168/168 [00:00<00:00, 1066.17it/s, train loss 1.589 ; val loss 1.125; lr 1.00e-05]
85/500: 100%|██████████| 168/168 [00:00<00:00, 896.15it/s, train loss 0.801 ; val loss 1.124; lr 1.00e-05]
86/500: 100%|██████████| 168/168 [00:00<00:00, 1052.62it/s, train loss 0.912 ; val loss 1.123; lr 1.00e-05]
87/500: 100%|██████████| 168/168 [00:00<00:00, 1026.96it/s, train loss 0.842 ; val loss 1.123; lr 1.00e-05]
88/500: 100%|██████████| 168/168 [00:00<00:00, 1053.02it/s, train loss 0.760 ; val loss 1.122; lr 1.00e-05]
89/500: 100%|██████████| 168/168 [00:00<00:00, 1072.72it/s, train loss 1.342 ; val loss 1.123; lr 1.00e-05]
90/500: 100%|██████████| 168/168 [00:00<00:00, 1059.42it/s, train loss 1.337 ; val loss 1.123; lr 1.00e-05]
91/500: 100%|██████████| 168/168 [00:00<00:00, 1052.63it/s, train loss 0.815 ; val loss 1.129; lr 1.00e-05]
92/500: 100%|██████████| 168/168 [00:00<00:00, 1046.10it/s, train loss 1.393 ; val loss 1.121; lr 1.00e-05]
93/500: 100%|██████████| 168/168 [00:00<00:00, 1039.81it/s, train loss 1.744 ; val loss 1.129; lr 1.00e-05]
94/500: 100%|██████████| 168/168 [00:00<00:00, 1014.58it/s, train loss 0.997 ; val loss 1.130; lr 1.00e-05]
95/500: 100%|██████████| 168/168 [00:00<00:00, 1046.23it/s, train loss 0.716 ; val loss 1.127; lr 1.00e-05]
96/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 0.622 ; val loss 1.121; lr 1.00e-05]
97/500: 100%|██████████| 168/168 [00:00<00:00, 1046.47it/s, train loss 0.947 ; val loss 1.121; lr 1.00e-05]
98/500: 100%|██████████| 168/168 [00:00<00:00, 1046.28it/s, train loss 2.001 ; val loss 1.124; lr 1.00e-05]
99/500: 100%|██████████| 168/168 [00:00<00:00, 1059.44it/s, train loss 2.089 ; val loss 1.120; lr 1.00e-05]
100/500: 100%|██████████| 168/168 [00:00<00:00, 1020.91it/s, train loss 1.154 ; val loss 1.125; lr 1.00e-05]
101/500: 100%|██████████| 168/168 [00:00<00:00, 967.95it/s, train loss 0.688 ; val loss 1.120; lr 1.00e-06]
102/500: 100%|██████████| 168/168 [00:00<00:00, 1066.15it/s, train loss 0.604 ; val loss 1.121; lr 1.00e-06]
103/500: 100%|██████████| 168/168 [00:00<00:00, 1059.25it/s, train loss 0.724 ; val loss 1.120; lr 1.00e-06]
104/500: 100%|██████████| 168/168 [00:00<00:00, 1072.99it/s, train loss 0.795 ; val loss 1.121; lr 1.00e-06]
105/500: 100%|██████████| 168/168 [00:00<00:00, 1059.25it/s, train loss 0.733 ; val loss 1.120; lr 1.00e-06]
106/500: 100%|██████████| 168/168 [00:00<00:00, 1066.14it/s, train loss 1.738 ; val loss 1.120; lr 1.00e-06]
107/500: 100%|██████████| 168/168 [00:00<00:00, 1046.09it/s, train loss 0.408 ; val loss 1.121; lr 1.00e-06]
108/500: 100%|██████████| 168/168 [00:00<00:00, 1059.24it/s, train loss 1.127 ; val loss 1.120; lr 1.00e-06]
109/500: 100%|██████████| 168/168 [00:00<00:00, 1052.83it/s, train loss 1.139 ; val loss 1.120; lr 1.00e-06]
110/500: 100%|██████████| 168/168 [00:00<00:00, 1059.45it/s, train loss 1.203 ; val loss 1.121; lr 1.00e-06]
111/500: 100%|██████████| 168/168 [00:00<00:00, 1072.99it/s, train loss 1.314 ; val loss 1.120; lr 1.00e-06]
112/500: 100%|██████████| 168/168 [00:00<00:00, 1066.14it/s, train loss 1.605 ; val loss 1.121; lr 1.00e-06]
113/500: 100%|██████████| 168/168 [00:00<00:00, 1046.10it/s, train loss 0.952 ; val loss 1.120; lr 1.00e-06]
114/500: 100%|██████████| 168/168 [00:00<00:00, 1039.83it/s, train loss 1.404 ; val loss 1.121; lr 1.00e-06]
115/500: 100%|██████████| 168/168 [00:00<00:00, 905.64it/s, train loss 0.668 ; val loss 1.121; lr 1.00e-06]
116/500: 100%|██████████| 168/168 [00:00<00:00, 1039.64it/s, train loss 2.154 ; val loss 1.120; lr 1.00e-06]
117/500: 100%|██████████| 168/168 [00:00<00:00, 1052.85it/s, train loss 0.952 ; val loss 1.120; lr 1.00e-06]
118/500: 100%|██████████| 168/168 [00:00<00:00, 1072.72it/s, train loss 0.456 ; val loss 1.121; lr 1.00e-06]
119/500: 100%|██████████| 168/168 [00:00<00:00, 1033.26it/s, train loss 1.846 ; val loss 1.120; lr 1.00e-06]
120/500: 100%|██████████| 168/168 [00:00<00:00, 1059.63it/s, train loss 0.808 ; val loss 1.120; lr 1.00e-06]
121/500: 100%|██████████| 168/168 [00:00<00:00, 1046.27it/s, train loss 0.849 ; val loss 1.120; lr 1.00e-06]
122/500: 100%|██████████| 168/168 [00:00<00:00, 1039.85it/s, train loss 0.992 ; val loss 1.121; lr 1.00e-06]
123/500: 100%|██████████| 168/168 [00:00<00:00, 1052.99it/s, train loss 0.407 ; val loss 1.120; lr 1.00e-06]
124/500: 100%|██████████| 168/168 [00:00<00:00, 1072.72it/s, train loss 0.530 ; val loss 1.120; lr 1.00e-06]
125/500: 100%|██████████| 168/168 [00:00<00:00, 1052.63it/s, train loss 0.775 ; val loss 1.120; lr 1.00e-06]
126/500: 100%|██████████| 168/168 [00:00<00:00, 1065.89it/s, train loss 0.919 ; val loss 1.121; lr 1.00e-06]
127/500: 100%|██████████| 168/168 [00:00<00:00, 1066.14it/s, train loss 0.772 ; val loss 1.120; lr 1.00e-06]
128/500: 100%|██████████| 168/168 [00:00<00:00, 1014.76it/s, train loss 1.036 ; val loss 1.120; lr 1.00e-06]
129/500: 100%|██████████| 168/168 [00:00<00:00, 1039.82it/s, train loss 0.743 ; val loss 1.120; lr 1.00e-06]
130/500: 100%|██████████| 168/168 [00:00<00:00, 1039.81it/s, train loss 0.485 ; val loss 1.120; lr 1.00e-06]
131/500: 100%|██████████| 168/168 [00:00<00:00, 1014.97it/s, train loss 0.546 ; val loss 1.120; lr 1.00e-06]
132/500: 100%|██████████| 168/168 [00:00<00:00, 1008.68it/s, train loss 0.868 ; val loss 1.120; lr 1.00e-06]
133/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 1.445 ; val loss 1.120; lr 1.00e-06]
134/500: 100%|██████████| 168/168 [00:00<00:00, 962.74it/s, train loss 0.522 ; val loss 1.120; lr 1.00e-06]
135/500: 100%|██████████| 168/168 [00:00<00:00, 973.71it/s, train loss 1.930 ; val loss 1.120; lr 1.00e-06]
136/500: 100%|██████████| 168/168 [00:00<00:00, 1008.69it/s, train loss 1.226 ; val loss 1.120; lr 1.00e-06]
137/500: 100%|██████████| 168/168 [00:00<00:00, 962.57it/s, train loss 0.694 ; val loss 1.120; lr 1.00e-06]
138/500: 100%|██████████| 168/168 [00:00<00:00, 1046.28it/s, train loss 1.159 ; val loss 1.120; lr 1.00e-06]
139/500: 100%|██████████| 168/168 [00:00<00:00, 968.11it/s, train loss 1.368 ; val loss 1.120; lr 1.00e-06]
140/500: 100%|██████████| 168/168 [00:00<00:00, 979.36it/s, train loss 0.690 ; val loss 1.120; lr 1.00e-06]
141/500: 100%|██████████| 168/168 [00:00<00:00, 1027.14it/s, train loss 1.174 ; val loss 1.120; lr 1.00e-06]
142/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 1.315 ; val loss 1.120; lr 1.00e-06]
143/500: 100%|██████████| 168/168 [00:00<00:00, 1033.45it/s, train loss 0.612 ; val loss 1.120; lr 1.00e-06]
144/500: 100%|██████████| 168/168 [00:00<00:00, 957.10it/s, train loss 0.904 ; val loss 1.121; lr 1.00e-06]
145/500: 100%|██████████| 168/168 [00:00<00:00, 1033.45it/s, train loss 0.593 ; val loss 1.120; lr 1.00e-06]
146/500: 100%|██████████| 168/168 [00:00<00:00, 1020.91it/s, train loss 1.028 ; val loss 1.120; lr 1.00e-06]
147/500: 100%|██████████| 168/168 [00:00<00:00, 1039.81it/s, train loss 0.692 ; val loss 1.120; lr 1.00e-06]
148/500: 100%|██████████| 168/168 [00:00<00:00, 1033.44it/s, train loss 0.721 ; val loss 1.120; lr 1.00e-06]
149/500: 100%|██████████| 168/168 [00:00<00:00, 1065.95it/s, train loss 1.313 ; val loss 1.120; lr 1.00e-06]
150/500: 100%|██████████| 168/168 [00:00<00:00, 1066.14it/s, train loss 0.945 ; val loss 1.120; lr 1.00e-06]
151/500: 100%|██████████| 168/168 [00:00<00:00, 1073.12it/s, train loss 1.776 ; val loss 1.120; lr 1.00e-06]
152/500: 100%|██████████| 168/168 [00:00<00:00, 1033.44it/s, train loss 1.566 ; val loss 1.120; lr 1.00e-06]
153/500: 100%|██████████| 168/168 [00:00<00:00, 1065.93it/s, train loss 1.788 ; val loss 1.120; lr 1.00e-06]
154/500: 100%|██████████| 168/168 [00:00<00:00, 1086.78it/s, train loss 0.296 ; val loss 1.120; lr 1.00e-06]
155/500: 100%|██████████| 168/168 [00:00<00:00, 1033.44it/s, train loss 1.199 ; val loss 1.119; lr 1.00e-06]
156/500: 100%|██████████| 168/168 [00:00<00:00, 1079.59it/s, train loss 1.339 ; val loss 1.120; lr 1.00e-06]
157/500: 100%|██████████| 168/168 [00:00<00:00, 1066.16it/s, train loss 0.170 ; val loss 1.120; lr 1.00e-06]
158/500: 100%|██████████| 168/168 [00:00<00:00, 1059.25it/s, train loss 1.396 ; val loss 1.120; lr 1.00e-06]
159/500: 100%|██████████| 168/168 [00:00<00:00, 1073.15it/s, train loss 0.858 ; val loss 1.120; lr 1.00e-06]
160/500: 100%|██████████| 168/168 [00:00<00:00, 1066.06it/s, train loss 2.059 ; val loss 1.120; lr 1.00e-06]
161/500: 100%|██████████| 168/168 [00:00<00:00, 1066.33it/s, train loss 0.920 ; val loss 1.120; lr 1.00e-06]
162/500: 100%|██████████| 168/168 [00:00<00:00, 1020.91it/s, train loss 0.723 ; val loss 1.119; lr 1.00e-06]
163/500: 100%|██████████| 168/168 [00:00<00:00, 1072.79it/s, train loss 0.630 ; val loss 1.120; lr 1.00e-06]
164/500: 100%|██████████| 168/168 [00:00<00:00, 1072.93it/s, train loss 2.161 ; val loss 1.119; lr 1.00e-06]
165/500: 100%|██████████| 168/168 [00:00<00:00, 1065.95it/s, train loss 1.006 ; val loss 1.120; lr 1.00e-06]
166/500: 100%|██████████| 168/168 [00:00<00:00, 1079.61it/s, train loss 1.243 ; val loss 1.119; lr 1.00e-06]
167/500: 100%|██████████| 168/168 [00:00<00:00, 1072.72it/s, train loss 1.056 ; val loss 1.120; lr 1.00e-06]
168/500: 100%|██████████| 168/168 [00:00<00:00, 1052.81it/s, train loss 1.081 ; val loss 1.120; lr 1.00e-06]
169/500: 100%|██████████| 168/168 [00:00<00:00, 1059.23it/s, train loss 0.393 ; val loss 1.120; lr 1.00e-06]
170/500: 100%|██████████| 168/168 [00:00<00:00, 1066.34it/s, train loss 1.364 ; val loss 1.120; lr 1.00e-06]
171/500: 100%|██████████| 168/168 [00:00<00:00, 1072.75it/s, train loss 0.387 ; val loss 1.120; lr 1.00e-06]
172/500: 100%|██████████| 168/168 [00:00<00:00, 1065.95it/s, train loss 0.536 ; val loss 1.119; lr 1.00e-06]
173/500: 100%|██████████| 168/168 [00:00<00:00, 1072.93it/s, train loss 2.274 ; val loss 1.120; lr 1.00e-06]
174/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 2.897 ; val loss 1.119; lr 1.00e-06]
175/500: 100%|██████████| 168/168 [00:00<00:00, 1046.09it/s, train loss 1.433 ; val loss 1.119; lr 1.00e-06]
176/500: 100%|██████████| 168/168 [00:00<00:00, 1066.22it/s, train loss 1.394 ; val loss 1.120; lr 1.00e-06]
177/500: 100%|██████████| 168/168 [00:00<00:00, 1072.99it/s, train loss 1.445 ; val loss 1.119; lr 1.00e-06]
178/500: 100%|██████████| 168/168 [00:00<00:00, 1059.44it/s, train loss 0.975 ; val loss 1.120; lr 1.00e-06]
179/500: 100%|██████████| 168/168 [00:00<00:00, 1079.51it/s, train loss 2.924 ; val loss 1.120; lr 1.00e-06]
180/500: 100%|██████████| 168/168 [00:00<00:00, 1059.25it/s, train loss 0.841 ; val loss 1.119; lr 1.00e-06]
181/500: 100%|██████████| 168/168 [00:00<00:00, 1066.13it/s, train loss 0.318 ; val loss 1.119; lr 1.00e-06]
182/500: 100%|██████████| 168/168 [00:00<00:00, 996.75it/s, train loss 0.997 ; val loss 1.119; lr 1.00e-06]
183/500: 100%|██████████| 168/168 [00:00<00:00, 1065.95it/s, train loss 0.716 ; val loss 1.119; lr 1.00e-06]
184/500: 100%|██████████| 168/168 [00:00<00:00, 1072.94it/s, train loss 1.190 ; val loss 1.119; lr 1.00e-06]
185/500: 100%|██████████| 168/168 [00:00<00:00, 1072.93it/s, train loss 1.260 ; val loss 1.119; lr 1.00e-06]
186/500: 100%|██████████| 168/168 [00:00<00:00, 1073.18it/s, train loss 0.924 ; val loss 1.121; lr 1.00e-06]
187/500: 100%|██████████| 168/168 [00:00<00:00, 1072.93it/s, train loss 1.646 ; val loss 1.119; lr 1.00e-06]
188/500: 100%|██████████| 168/168 [00:00<00:00, 1066.35it/s, train loss 1.291 ; val loss 1.119; lr 1.00e-06]
189/500: 100%|██████████| 168/168 [00:00<00:00, 1052.81it/s, train loss 0.445 ; val loss 1.119; lr 1.00e-06]
190/500: 100%|██████████| 168/168 [00:00<00:00, 1066.14it/s, train loss 1.815 ; val loss 1.119; lr 1.00e-06]
191/500: 100%|██████████| 168/168 [00:00<00:00, 1065.94it/s, train loss 0.895 ; val loss 1.120; lr 1.00e-06]
192/500: 100%|██████████| 168/168 [00:00<00:00, 1065.95it/s, train loss 1.103 ; val loss 1.119; lr 1.00e-06]
193/500: 100%|██████████| 168/168 [00:00<00:00, 1046.56it/s, train loss 1.217 ; val loss 1.119; lr 1.00e-06]
194/500: 100%|██████████| 168/168 [00:00<00:00, 1072.93it/s, train loss 2.194 ; val loss 1.119; lr 1.00e-06]
195/500: 100%|██████████| 168/168 [00:00<00:00, 1079.60it/s, train loss 1.587 ; val loss 1.119; lr 1.00e-06]
196/500: 100%|██████████| 168/168 [00:00<00:00, 1066.14it/s, train loss 0.642 ; val loss 1.120; lr 1.00e-06]
197/500: 100%|██████████| 168/168 [00:00<00:00, 1040.01it/s, train loss 1.214 ; val loss 1.119; lr 1.00e-06]
198/500: 100%|██████████| 168/168 [00:00<00:00, 1066.38it/s, train loss 2.006 ; val loss 1.119; lr 1.00e-06]
199/500: 100%|██████████| 168/168 [00:00<00:00, 1053.01it/s, train loss 0.873 ; val loss 1.120; lr 1.00e-06]
200/500: 100%|██████████| 168/168 [00:00<00:00, 1052.62it/s, train loss 1.203 ; val loss 1.119; lr 1.00e-06]
201/500: 100%|██████████| 168/168 [00:00<00:00, 1066.35it/s, train loss 0.545 ; val loss 1.119; lr 1.00e-06]
202/500: 100%|██████████| 168/168 [00:00<00:00, 1072.93it/s, train loss 1.647 ; val loss 1.120; lr 1.00e-06]
203/500: 100%|██████████| 168/168 [00:00<00:00, 1027.32it/s, train loss 2.205 ; val loss 1.119; lr 1.00e-06]
204/500: 100%|██████████| 168/168 [00:00<00:00, 1020.76it/s, train loss 0.819 ; val loss 1.119; lr 1.00e-06]
205/500: 100%|██████████| 168/168 [00:00<00:00, 1073.14it/s, train loss 1.714 ; val loss 1.120; lr 1.00e-06]
206/500: 100%|██████████| 168/168 [00:00<00:00, 1039.82it/s, train loss 0.698 ; val loss 1.119; lr 1.00e-06]
207/500: 100%|██████████| 168/168 [00:00<00:00, 1052.81it/s, train loss 0.925 ; val loss 1.120; lr 1.00e-06]
208/500: 100%|██████████| 168/168 [00:00<00:00, 1059.48it/s, train loss 1.204 ; val loss 1.119; lr 1.00e-06]
209/500: 100%|██████████| 168/168 [00:00<00:00, 1046.09it/s, train loss 0.600 ; val loss 1.119; lr 1.00e-06]
210/500: 100%|██████████| 168/168 [00:00<00:00, 1040.01it/s, train loss 1.593 ; val loss 1.119; lr 1.00e-06]
211/500: 100%|██████████| 168/168 [00:00<00:00, 1039.82it/s, train loss 1.334 ; val loss 1.119; lr 1.00e-06]
212/500: 100%|██████████| 168/168 [00:00<00:00, 1046.09it/s, train loss 0.481 ; val loss 1.119; lr 1.00e-06]
213/500: 100%|██████████| 168/168 [00:00<00:00, 1046.27it/s, train loss 1.061 ; val loss 1.119; lr 1.00e-06]
214/500: 100%|██████████| 168/168 [00:00<00:00, 951.70it/s, train loss 0.762 ; val loss 1.119; lr 1.00e-06]
215/500: 100%|██████████| 168/168 [00:00<00:00, 1059.44it/s, train loss 1.434 ; val loss 1.119; lr 1.00e-06]
216/500: 100%|██████████| 168/168 [00:00<00:00, 1039.82it/s, train loss 0.680 ; val loss 1.119; lr 1.00e-06]
217/500: 100%|██████████| 168/168 [00:00<00:00, 1033.44it/s, train loss 2.090 ; val loss 1.119; lr 1.00e-06]
218/500: 100%|██████████| 168/168 [00:00<00:00, 1033.44it/s, train loss 1.149 ; val loss 1.119; lr 1.00e-06]
219/500: 100%|██████████| 168/168 [00:00<00:00, 1040.04it/s, train loss 0.313 ; val loss 1.119; lr 1.00e-06]
220/500: 100%|██████████| 168/168 [00:00<00:00, 1052.81it/s, train loss 0.501 ; val loss 1.119; lr 1.00e-06]
221/500: 100%|██████████| 168/168 [00:00<00:00, 1046.09it/s, train loss 0.960 ; val loss 1.119; lr 1.00e-06]
222/500: 100%|██████████| 168/168 [00:00<00:00, 1066.15it/s, train loss 1.152 ; val loss 1.119; lr 1.00e-06]
223/500: 100%|██████████| 168/168 [00:00<00:00, 1046.09it/s, train loss 0.920 ; val loss 1.119; lr 1.00e-06]
224/500: 100%|██████████| 168/168 [00:00<00:00, 1033.66it/s, train loss 1.518 ; val loss 1.119; lr 1.00e-06]
225/500: 100%|██████████| 168/168 [00:00<00:00, 1065.93it/s, train loss 1.465 ; val loss 1.119; lr 1.00e-06]
226/500: 100%|██████████| 168/168 [00:00<00:00, 1046.52it/s, train loss 0.326 ; val loss 1.119; lr 1.00e-06]
227/500: 100%|██████████| 168/168 [00:00<00:00, 1059.25it/s, train loss 1.099 ; val loss 1.120; lr 1.00e-06]
228/500: 100%|██████████| 168/168 [00:00<00:00, 1053.02it/s, train loss 1.143 ; val loss 1.119; lr 1.00e-06]
229/500: 100%|██████████| 168/168 [00:00<00:00, 1027.13it/s, train loss 0.606 ; val loss 1.119; lr 1.00e-06]
230/500: 100%|██████████| 168/168 [00:00<00:00, 1059.44it/s, train loss 1.000 ; val loss 1.119; lr 1.00e-06]
231/500: 100%|██████████| 168/168 [00:00<00:00, 1002.68it/s, train loss 1.695 ; val loss 1.119; lr 1.00e-06]
232/500: 100%|██████████| 168/168 [00:00<00:00, 1072.74it/s, train loss 1.169 ; val loss 1.120; lr 1.00e-06]
233/500: 100%|██████████| 168/168 [00:00<00:00, 1066.35it/s, train loss 1.140 ; val loss 1.119; lr 1.00e-06]
234/500: 100%|██████████| 168/168 [00:00<00:00, 1072.73it/s, train loss 3.537 ; val loss 1.119; lr 1.00e-06]
235/500: 100%|██████████| 168/168 [00:00<00:00, 1059.62it/s, train loss 0.435 ; val loss 1.119; lr 1.00e-06]
236/500: 100%|██████████| 168/168 [00:00<00:00, 1066.23it/s, train loss 0.922 ; val loss 1.119; lr 1.00e-06]
237/500: 100%|██████████| 168/168 [00:00<00:00, 1046.11it/s, train loss 0.809 ; val loss 1.119; lr 1.00e-06]
238/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 1.882 ; val loss 1.119; lr 1.00e-06]
239/500: 100%|██████████| 168/168 [00:00<00:00, 1079.61it/s, train loss 0.738 ; val loss 1.119; lr 1.00e-06]
240/500: 100%|██████████| 168/168 [00:00<00:00, 1079.60it/s, train loss 0.877 ; val loss 1.119; lr 1.00e-06]
241/500: 100%|██████████| 168/168 [00:00<00:00, 1014.76it/s, train loss 1.014 ; val loss 1.119; lr 1.00e-06]
242/500: 100%|██████████| 168/168 [00:00<00:00, 941.06it/s, train loss 0.771 ; val loss 1.119; lr 1.00e-06]
243/500: 100%|██████████| 168/168 [00:00<00:00, 941.06it/s, train loss 1.279 ; val loss 1.119; lr 1.00e-06]
244/500: 100%|██████████| 168/168 [00:00<00:00, 1014.76it/s, train loss 0.650 ; val loss 1.120; lr 1.00e-06]
245/500: 100%|██████████| 168/168 [00:00<00:00, 990.89it/s, train loss 0.856 ; val loss 1.118; lr 1.00e-06]
246/500: 100%|██████████| 168/168 [00:00<00:00, 1066.14it/s, train loss 1.377 ; val loss 1.119; lr 1.00e-06]
247/500: 100%|██████████| 168/168 [00:00<00:00, 1072.72it/s, train loss 0.645 ; val loss 1.119; lr 1.00e-06]
248/500: 100%|██████████| 168/168 [00:00<00:00, 1052.61it/s, train loss 1.047 ; val loss 1.119; lr 1.00e-06]
249/500: 100%|██████████| 168/168 [00:00<00:00, 1059.43it/s, train loss 0.915 ; val loss 1.119; lr 1.00e-06]
250/500: 100%|██████████| 168/168 [00:00<00:00, 1066.17it/s, train loss 0.964 ; val loss 1.119; lr 1.00e-06]
251/500: 100%|██████████| 168/168 [00:00<00:00, 1059.63it/s, train loss 1.385 ; val loss 1.119; lr 1.00e-07]
252/500: 100%|██████████| 168/168 [00:00<00:00, 1072.74it/s, train loss 1.005 ; val loss 1.119; lr 1.00e-07]
253/500: 100%|██████████| 168/168 [00:00<00:00, 1065.95it/s, train loss 0.733 ; val loss 1.118; lr 1.00e-07]
254/500: 100%|██████████| 168/168 [00:00<00:00, 1059.26it/s, train loss 0.948 ; val loss 1.119; lr 1.00e-07]
255/500: 100%|██████████| 168/168 [00:00<00:00, 1059.44it/s, train loss 1.246 ; val loss 1.118; lr 1.00e-07]
256/500: 100%|██████████| 168/168 [00:00<00:00, 1072.94it/s, train loss 1.026 ; val loss 1.119; lr 1.00e-07]
257/500: 100%|██████████| 168/168 [00:00<00:00, 1086.78it/s, train loss 1.251 ; val loss 1.119; lr 1.00e-07]
258/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 1.299 ; val loss 1.119; lr 1.00e-07]
259/500: 100%|██████████| 168/168 [00:00<00:00, 1059.44it/s, train loss 0.831 ; val loss 1.119; lr 1.00e-07]
260/500: 100%|██████████| 168/168 [00:00<00:00, 1072.72it/s, train loss 0.582 ; val loss 1.119; lr 1.00e-07]
261/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 1.308 ; val loss 1.119; lr 1.00e-07]
262/500: 100%|██████████| 168/168 [00:00<00:00, 1065.95it/s, train loss 1.619 ; val loss 1.119; lr 1.00e-07]
263/500: 100%|██████████| 168/168 [00:00<00:00, 1059.69it/s, train loss 1.132 ; val loss 1.119; lr 1.00e-07]
264/500: 100%|██████████| 168/168 [00:00<00:00, 1072.74it/s, train loss 1.373 ; val loss 1.119; lr 1.00e-07]
265/500: 100%|██████████| 168/168 [00:00<00:00, 1046.51it/s, train loss 1.783 ; val loss 1.119; lr 1.00e-07]
266/500: 100%|██████████| 168/168 [00:00<00:00, 1059.25it/s, train loss 0.836 ; val loss 1.119; lr 1.00e-07]
267/500: 100%|██████████| 168/168 [00:00<00:00, 1072.94it/s, train loss 0.806 ; val loss 1.119; lr 1.00e-07]
268/500: 100%|██████████| 168/168 [00:00<00:00, 1059.25it/s, train loss 0.460 ; val loss 1.119; lr 1.00e-07]
269/500: 100%|██████████| 168/168 [00:00<00:00, 1080.10it/s, train loss 1.897 ; val loss 1.119; lr 1.00e-07]
270/500: 100%|██████████| 168/168 [00:00<00:00, 1066.11it/s, train loss 0.601 ; val loss 1.119; lr 1.00e-07]
271/500: 100%|██████████| 168/168 [00:00<00:00, 1072.95it/s, train loss 1.583 ; val loss 1.118; lr 1.00e-07]
272/500: 100%|██████████| 168/168 [00:00<00:00, 1059.26it/s, train loss 0.811 ; val loss 1.119; lr 1.00e-07]
273/500: 100%|██████████| 168/168 [00:00<00:00, 1066.35it/s, train loss 1.564 ; val loss 1.119; lr 1.00e-07]
274/500: 100%|██████████| 168/168 [00:00<00:00, 1059.24it/s, train loss 1.591 ; val loss 1.119; lr 1.00e-07]
275/500: 100%|██████████| 168/168 [00:00<00:00, 1072.94it/s, train loss 3.323 ; val loss 1.119; lr 1.00e-07]
276/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 1.089 ; val loss 1.119; lr 1.00e-07]
277/500: 100%|██████████| 168/168 [00:00<00:00, 1079.81it/s, train loss 1.225 ; val loss 1.119; lr 1.00e-07]
278/500: 100%|██████████| 168/168 [00:00<00:00, 1059.44it/s, train loss 0.823 ; val loss 1.119; lr 1.00e-07]
279/500: 100%|██████████| 168/168 [00:00<00:00, 1039.81it/s, train loss 1.183 ; val loss 1.119; lr 1.00e-07]
280/500: 100%|██████████| 168/168 [00:00<00:00, 1059.63it/s, train loss 2.012 ; val loss 1.119; lr 1.00e-07]
281/500: 100%|██████████| 168/168 [00:00<00:00, 905.64it/s, train loss 0.515 ; val loss 1.119; lr 1.00e-07]
282/500: 100%|██████████| 168/168 [00:00<00:00, 1065.95it/s, train loss 0.755 ; val loss 1.119; lr 1.00e-07]
283/500: 100%|██████████| 168/168 [00:00<00:00, 1053.00it/s, train loss 1.214 ; val loss 1.119; lr 1.00e-07]
284/500: 100%|██████████| 168/168 [00:00<00:00, 1065.95it/s, train loss 1.343 ; val loss 1.118; lr 1.00e-07]
285/500: 100%|██████████| 168/168 [00:00<00:00, 1079.61it/s, train loss 0.676 ; val loss 1.119; lr 1.00e-07]
286/500: 100%|██████████| 168/168 [00:00<00:00, 1027.15it/s, train loss 1.344 ; val loss 1.119; lr 1.00e-07]
287/500: 100%|██████████| 168/168 [00:00<00:00, 1039.64it/s, train loss 0.422 ; val loss 1.119; lr 1.00e-07]
288/500: 100%|██████████| 168/168 [00:00<00:00, 1039.64it/s, train loss 2.647 ; val loss 1.118; lr 1.00e-07]
289/500: 100%|██████████| 168/168 [00:00<00:00, 1066.16it/s, train loss 0.428 ; val loss 1.119; lr 1.00e-07]
290/500: 100%|██████████| 168/168 [00:00<00:00, 1072.74it/s, train loss 0.393 ; val loss 1.119; lr 1.00e-07]
291/500: 100%|██████████| 168/168 [00:00<00:00, 1079.82it/s, train loss 2.518 ; val loss 1.119; lr 1.00e-07]
292/500: 100%|██████████| 168/168 [00:00<00:00, 1046.09it/s, train loss 0.979 ; val loss 1.119; lr 1.00e-07]
293/500: 100%|██████████| 168/168 [00:00<00:00, 1033.45it/s, train loss 1.049 ; val loss 1.119; lr 1.00e-07]
294/500: 100%|██████████| 168/168 [00:00<00:00, 1072.73it/s, train loss 1.930 ; val loss 1.119; lr 1.00e-07]
295/500: 100%|██████████| 168/168 [00:00<00:00, 1072.74it/s, train loss 1.325 ; val loss 1.119; lr 1.00e-07]
296/500: 100%|██████████| 168/168 [00:00<00:00, 1072.75it/s, train loss 0.393 ; val loss 1.119; lr 1.00e-07]
297/500: 100%|██████████| 168/168 [00:00<00:00, 1066.15it/s, train loss 0.961 ; val loss 1.118; lr 1.00e-07]
298/500: 100%|██████████| 168/168 [00:00<00:00, 1066.15it/s, train loss 0.825 ; val loss 1.118; lr 1.00e-07]
299/500: 100%|██████████| 168/168 [00:00<00:00, 1039.64it/s, train loss 1.140 ; val loss 1.118; lr 1.00e-07]
300/500: 100%|██████████| 168/168 [00:00<00:00, 1046.48it/s, train loss 0.741 ; val loss 1.119; lr 1.00e-07]
301/500: 100%|██████████| 168/168 [00:00<00:00, 1072.74it/s, train loss 1.126 ; val loss 1.118; lr 1.00e-08]
302/500: 100%|██████████| 168/168 [00:00<00:00, 1059.25it/s, train loss 3.281 ; val loss 1.118; lr 1.00e-08]
303/500: 100%|██████████| 168/168 [00:00<00:00, 1066.35it/s, train loss 1.027 ; val loss 1.118; lr 1.00e-08]
304/500: 100%|██████████| 168/168 [00:00<00:00, 1059.51it/s, train loss 0.849 ; val loss 1.118; lr 1.00e-08]
305/500: 100%|██████████| 168/168 [00:00<00:00, 1039.82it/s, train loss 1.064 ; val loss 1.118; lr 1.00e-08]
306/500: 100%|██████████| 168/168 [00:00<00:00, 1066.12it/s, train loss 0.818 ; val loss 1.118; lr 1.00e-08]
307/500: 100%|██████████| 168/168 [00:00<00:00, 1046.10it/s, train loss 0.626 ; val loss 1.118; lr 1.00e-08]
308/500: 100%|██████████| 168/168 [00:00<00:00, 1046.28it/s, train loss 1.236 ; val loss 1.118; lr 1.00e-08]
309/500: 100%|██████████| 168/168 [00:00<00:00, 1072.95it/s, train loss 0.708 ; val loss 1.118; lr 1.00e-08]
310/500: 100%|██████████| 168/168 [00:00<00:00, 1066.16it/s, train loss 0.645 ; val loss 1.118; lr 1.00e-08]
311/500: 100%|██████████| 168/168 [00:00<00:00, 1066.14it/s, train loss 0.445 ; val loss 1.118; lr 1.00e-08]
312/500: 100%|██████████| 168/168 [00:00<00:00, 1059.25it/s, train loss 0.967 ; val loss 1.118; lr 1.00e-08]
313/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 0.909 ; val loss 1.118; lr 1.00e-08]
314/500: 100%|██████████| 168/168 [00:00<00:00, 1046.10it/s, train loss 0.569 ; val loss 1.118; lr 1.00e-08]
315/500: 100%|██████████| 168/168 [00:00<00:00, 1053.05it/s, train loss 2.166 ; val loss 1.118; lr 1.00e-08]
316/500: 100%|██████████| 168/168 [00:00<00:00, 1072.72it/s, train loss 0.725 ; val loss 1.118; lr 1.00e-08]
317/500: 100%|██████████| 168/168 [00:00<00:00, 1014.94it/s, train loss 1.236 ; val loss 1.118; lr 1.00e-08]
318/500: 100%|██████████| 168/168 [00:00<00:00, 1033.26it/s, train loss 0.938 ; val loss 1.118; lr 1.00e-08]
319/500: 100%|██████████| 168/168 [00:00<00:00, 891.14it/s, train loss 0.512 ; val loss 1.118; lr 1.00e-08]
320/500: 100%|██████████| 168/168 [00:00<00:00, 1059.43it/s, train loss 1.135 ; val loss 1.118; lr 1.00e-08]
321/500: 100%|██████████| 168/168 [00:00<00:00, 1066.14it/s, train loss 1.003 ; val loss 1.118; lr 1.00e-08]
322/500: 100%|██████████| 168/168 [00:00<00:00, 1059.25it/s, train loss 0.593 ; val loss 1.118; lr 1.00e-08]
323/500: 100%|██████████| 168/168 [00:00<00:00, 1065.93it/s, train loss 1.023 ; val loss 1.118; lr 1.00e-08]
324/500: 100%|██████████| 168/168 [00:00<00:00, 1066.38it/s, train loss 0.717 ; val loss 1.118; lr 1.00e-08]
325/500: 100%|██████████| 168/168 [00:00<00:00, 1093.84it/s, train loss 3.444 ; val loss 1.118; lr 1.00e-08]
326/500: 100%|██████████| 168/168 [00:00<00:00, 1052.63it/s, train loss 0.823 ; val loss 1.118; lr 1.00e-08]
327/500: 100%|██████████| 168/168 [00:00<00:00, 1059.44it/s, train loss 0.759 ; val loss 1.118; lr 1.00e-08]
328/500: 100%|██████████| 168/168 [00:00<00:00, 1072.74it/s, train loss 0.241 ; val loss 1.118; lr 1.00e-08]
329/500: 100%|██████████| 168/168 [00:00<00:00, 1066.35it/s, train loss 0.548 ; val loss 1.118; lr 1.00e-08]
330/500: 100%|██████████| 168/168 [00:00<00:00, 1052.63it/s, train loss 0.843 ; val loss 1.118; lr 1.00e-08]
331/500: 100%|██████████| 168/168 [00:00<00:00, 1059.64it/s, train loss 1.762 ; val loss 1.118; lr 1.00e-08]
332/500: 100%|██████████| 168/168 [00:00<00:00, 1065.94it/s, train loss 0.683 ; val loss 1.118; lr 1.00e-08]
333/500: 100%|██████████| 168/168 [00:00<00:00, 1072.94it/s, train loss 0.637 ; val loss 1.118; lr 1.00e-08]
334/500: 100%|██████████| 168/168 [00:00<00:00, 1053.00it/s, train loss 1.549 ; val loss 1.118; lr 1.00e-08]
335/500: 100%|██████████| 168/168 [00:00<00:00, 1065.96it/s, train loss 1.201 ; val loss 1.118; lr 1.00e-08]
336/500: 100%|██████████| 168/168 [00:00<00:00, 1066.14it/s, train loss 1.057 ; val loss 1.118; lr 1.00e-08]
337/500: 100%|██████████| 168/168 [00:00<00:00, 1065.98it/s, train loss 0.774 ; val loss 1.118; lr 1.00e-08]
338/500: 100%|██████████| 168/168 [00:00<00:00, 1080.03it/s, train loss 0.622 ; val loss 1.118; lr 1.00e-08]
339/500: 100%|██████████| 168/168 [00:00<00:00, 1066.35it/s, train loss 0.908 ; val loss 1.118; lr 1.00e-08]
340/500: 100%|██████████| 168/168 [00:00<00:00, 1086.58it/s, train loss 1.526 ; val loss 1.118; lr 1.00e-08]
341/500: 100%|██████████| 168/168 [00:00<00:00, 1046.29it/s, train loss 0.882 ; val loss 1.118; lr 1.00e-08]
342/500: 100%|██████████| 168/168 [00:00<00:00, 1072.93it/s, train loss 3.584 ; val loss 1.118; lr 1.00e-08]
343/500: 100%|██████████| 168/168 [00:00<00:00, 1046.09it/s, train loss 0.809 ; val loss 1.118; lr 1.00e-08]
344/500: 100%|██████████| 168/168 [00:00<00:00, 1072.92it/s, train loss 0.731 ; val loss 1.118; lr 1.00e-08]
345/500: 100%|██████████| 168/168 [00:00<00:00, 1086.58it/s, train loss 1.040 ; val loss 1.118; lr 1.00e-08]
346/500: 100%|██████████| 168/168 [00:00<00:00, 1079.81it/s, train loss 0.681 ; val loss 1.118; lr 1.00e-08]
347/500: 100%|██████████| 168/168 [00:00<00:00, 1046.28it/s, train loss 1.399 ; val loss 1.118; lr 1.00e-08]
348/500: 100%|██████████| 168/168 [00:00<00:00, 1027.14it/s, train loss 1.515 ; val loss 1.118; lr 1.00e-08]
349/500: 100%|██████████| 168/168 [00:00<00:00, 1059.42it/s, train loss 0.801 ; val loss 1.118; lr 1.00e-08]
350/500: 100%|██████████| 168/168 [00:00<00:00, 1072.75it/s, train loss 1.517 ; val loss 1.118; lr 1.00e-08]
351/500: 100%|██████████| 168/168 [00:00<00:00, 1079.83it/s, train loss 2.065 ; val loss 1.118; lr 1.00e-08]
352/500: 100%|██████████| 168/168 [00:00<00:00, 1072.93it/s, train loss 0.446 ; val loss 1.118; lr 1.00e-08]
353/500: 100%|██████████| 168/168 [00:00<00:00, 1072.72it/s, train loss 0.514 ; val loss 1.118; lr 1.00e-08]
354/500: 100%|██████████| 168/168 [00:00<00:00, 872.95it/s, train loss 1.603 ; val loss 1.118; lr 1.00e-08]
355/500: 100%|██████████| 168/168 [00:00<00:00, 920.49it/s, train loss 1.895 ; val loss 1.118; lr 1.00e-08]
356/500: 100%|██████████| 168/168 [00:00<00:00, 1002.68it/s, train loss 0.643 ; val loss 1.118; lr 1.00e-08]
357/500: 100%|██████████| 168/168 [00:00<00:00, 996.57it/s, train loss 1.155 ; val loss 1.118; lr 1.00e-08]
358/500: 100%|██████████| 168/168 [00:00<00:00, 979.20it/s, train loss 0.623 ; val loss 1.118; lr 1.00e-08]
359/500: 100%|██████████| 168/168 [00:00<00:00, 951.89it/s, train loss 1.522 ; val loss 1.118; lr 1.00e-08]
360/500: 100%|██████████| 168/168 [00:00<00:00, 1039.82it/s, train loss 1.112 ; val loss 1.118; lr 1.00e-08]
361/500: 100%|██████████| 168/168 [00:00<00:00, 1033.43it/s, train loss 0.508 ; val loss 1.118; lr 1.00e-08]
362/500: 100%|██████████| 168/168 [00:00<00:00, 765.69it/s, train loss 0.831 ; val loss 1.118; lr 1.00e-08]
363/500: 100%|██████████| 168/168 [00:00<00:00, 957.11it/s, train loss 1.090 ; val loss 1.118; lr 1.00e-08]
364/500: 100%|██████████| 168/168 [00:00<00:00, 990.89it/s, train loss 1.418 ; val loss 1.118; lr 1.00e-08]
365/500: 100%|██████████| 168/168 [00:00<00:00, 1065.95it/s, train loss 0.653 ; val loss 1.118; lr 1.00e-08]
366/500: 100%|██████████| 168/168 [00:00<00:00, 1039.83it/s, train loss 1.886 ; val loss 1.118; lr 1.00e-08]
367/500: 100%|██████████| 168/168 [00:00<00:00, 956.75it/s, train loss 1.268 ; val loss 1.118; lr 1.00e-08]
368/500: 100%|██████████| 168/168 [00:00<00:00, 1002.65it/s, train loss 0.897 ; val loss 1.118; lr 1.00e-08]
369/500: 100%|██████████| 168/168 [00:00<00:00, 924.64it/s, train loss 1.249 ; val loss 1.118; lr 1.00e-08]
370/500: 100%|██████████| 168/168 [00:00<00:00, 1035.79it/s, train loss 0.434 ; val loss 1.118; lr 1.00e-08]
371/500: 100%|██████████| 168/168 [00:00<00:00, 1053.01it/s, train loss 1.027 ; val loss 1.118; lr 1.00e-08]
372/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 0.316 ; val loss 1.118; lr 1.00e-08]
373/500: 100%|██████████| 168/168 [00:00<00:00, 853.76it/s, train loss 0.879 ; val loss 1.118; lr 1.00e-08]
374/500: 100%|██████████| 168/168 [00:00<00:00, 634.19it/s, train loss 0.793 ; val loss 1.118; lr 1.00e-08]
375/500: 100%|██████████| 168/168 [00:00<00:00, 1032.36it/s, train loss 1.373 ; val loss 1.118; lr 1.00e-08]
376/500: 100%|██████████| 168/168 [00:00<00:00, 1027.17it/s, train loss 1.175 ; val loss 1.118; lr 1.00e-08]
377/500: 100%|██████████| 168/168 [00:00<00:00, 1066.36it/s, train loss 0.870 ; val loss 1.118; lr 1.00e-08]
378/500: 100%|██████████| 168/168 [00:00<00:00, 1072.74it/s, train loss 0.812 ; val loss 1.118; lr 1.00e-08]
379/500: 100%|██████████| 168/168 [00:00<00:00, 1100.95it/s, train loss 0.742 ; val loss 1.118; lr 1.00e-08]
380/500: 100%|██████████| 168/168 [00:00<00:00, 996.93it/s, train loss 1.447 ; val loss 1.118; lr 1.00e-08]
381/500: 100%|██████████| 168/168 [00:00<00:00, 1079.62it/s, train loss 0.728 ; val loss 1.118; lr 1.00e-08]
382/500: 100%|██████████| 168/168 [00:00<00:00, 1059.25it/s, train loss 0.794 ; val loss 1.118; lr 1.00e-08]
383/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 0.969 ; val loss 1.118; lr 1.00e-08]
384/500: 100%|██████████| 168/168 [00:00<00:00, 1079.81it/s, train loss 2.862 ; val loss 1.118; lr 1.00e-08]
385/500: 100%|██████████| 168/168 [00:00<00:00, 1072.74it/s, train loss 1.419 ; val loss 1.118; lr 1.00e-08]
386/500: 100%|██████████| 168/168 [00:00<00:00, 1039.95it/s, train loss 0.282 ; val loss 1.118; lr 1.00e-08]
387/500: 100%|██████████| 168/168 [00:00<00:00, 1086.77it/s, train loss 1.187 ; val loss 1.118; lr 1.00e-08]
388/500: 100%|██████████| 168/168 [00:00<00:00, 1079.79it/s, train loss 0.925 ; val loss 1.118; lr 1.00e-08]
389/500: 100%|██████████| 168/168 [00:00<00:00, 1039.88it/s, train loss 1.632 ; val loss 1.118; lr 1.00e-08]
390/500: 100%|██████████| 168/168 [00:00<00:00, 1014.59it/s, train loss 2.953 ; val loss 1.118; lr 1.00e-08]
391/500: 100%|██████████| 168/168 [00:00<00:00, 1065.95it/s, train loss 1.268 ; val loss 1.118; lr 1.00e-08]
392/500: 100%|██████████| 168/168 [00:00<00:00, 1100.78it/s, train loss 0.629 ; val loss 1.118; lr 1.00e-08]
393/500: 100%|██████████| 168/168 [00:00<00:00, 1093.84it/s, train loss 0.761 ; val loss 1.118; lr 1.00e-08]
394/500: 100%|██████████| 168/168 [00:00<00:00, 1101.20it/s, train loss 0.890 ; val loss 1.118; lr 1.00e-08]
395/500: 100%|██████████| 168/168 [00:00<00:00, 1066.12it/s, train loss 1.595 ; val loss 1.118; lr 1.00e-08]
396/500: 100%|██████████| 168/168 [00:00<00:00, 1027.36it/s, train loss 1.024 ; val loss 1.118; lr 1.00e-08]
397/500: 100%|██████████| 168/168 [00:00<00:00, 1086.78it/s, train loss 0.753 ; val loss 1.118; lr 1.00e-08]
398/500: 100%|██████████| 168/168 [00:00<00:00, 1072.74it/s, train loss 1.287 ; val loss 1.118; lr 1.00e-08]
399/500: 100%|██████████| 168/168 [00:00<00:00, 1100.78it/s, train loss 0.733 ; val loss 1.118; lr 1.00e-08]
400/500: 100%|██████████| 168/168 [00:00<00:00, 1115.35it/s, train loss 0.874 ; val loss 1.118; lr 1.00e-08]
401/500: 100%|██████████| 168/168 [00:00<00:00, 1093.61it/s, train loss 1.655 ; val loss 1.118; lr 1.00e-09]
402/500: 100%|██████████| 168/168 [00:00<00:00, 1086.78it/s, train loss 1.020 ; val loss 1.118; lr 1.00e-09]
403/500: 100%|██████████| 168/168 [00:00<00:00, 1066.34it/s, train loss 0.484 ; val loss 1.118; lr 1.00e-09]
404/500: 100%|██████████| 168/168 [00:00<00:00, 1072.93it/s, train loss 1.317 ; val loss 1.118; lr 1.00e-09]
405/500: 100%|██████████| 168/168 [00:00<00:00, 1100.98it/s, train loss 0.501 ; val loss 1.118; lr 1.00e-09]
406/500: 100%|██████████| 168/168 [00:00<00:00, 1100.78it/s, train loss 1.020 ; val loss 1.118; lr 1.00e-09]
407/500: 100%|██████████| 168/168 [00:00<00:00, 1073.17it/s, train loss 0.751 ; val loss 1.118; lr 1.00e-09]
408/500: 100%|██████████| 168/168 [00:00<00:00, 1100.99it/s, train loss 1.314 ; val loss 1.118; lr 1.00e-09]
409/500: 100%|██████████| 168/168 [00:00<00:00, 1046.37it/s, train loss 0.938 ; val loss 1.118; lr 1.00e-09]
410/500: 100%|██████████| 168/168 [00:00<00:00, 1052.82it/s, train loss 1.758 ; val loss 1.118; lr 1.00e-09]
411/500: 100%|██████████| 168/168 [00:00<00:00, 1033.44it/s, train loss 0.469 ; val loss 1.118; lr 1.00e-09]
412/500: 100%|██████████| 168/168 [00:00<00:00, 1086.77it/s, train loss 0.898 ; val loss 1.118; lr 1.00e-09]
413/500: 100%|██████████| 168/168 [00:00<00:00, 1086.55it/s, train loss 0.618 ; val loss 1.118; lr 1.00e-09]
414/500: 100%|██████████| 168/168 [00:00<00:00, 1093.64it/s, train loss 1.185 ; val loss 1.118; lr 1.00e-09]
415/500: 100%|██████████| 168/168 [00:00<00:00, 1108.23it/s, train loss 0.712 ; val loss 1.118; lr 1.00e-09]
416/500: 100%|██████████| 168/168 [00:00<00:00, 1059.43it/s, train loss 1.075 ; val loss 1.118; lr 1.00e-09]
417/500: 100%|██████████| 168/168 [00:00<00:00, 1094.04it/s, train loss 0.864 ; val loss 1.118; lr 1.00e-09]
418/500: 100%|██████████| 168/168 [00:00<00:00, 1100.78it/s, train loss 1.811 ; val loss 1.118; lr 1.00e-09]
419/500: 100%|██████████| 168/168 [00:00<00:00, 1039.64it/s, train loss 0.468 ; val loss 1.118; lr 1.00e-09]
420/500: 100%|██████████| 168/168 [00:00<00:00, 1014.77it/s, train loss 0.824 ; val loss 1.118; lr 1.00e-09]
421/500: 100%|██████████| 168/168 [00:00<00:00, 1100.78it/s, train loss 0.924 ; val loss 1.118; lr 1.00e-09]
422/500: 100%|██████████| 168/168 [00:00<00:00, 1093.64it/s, train loss 1.315 ; val loss 1.118; lr 1.00e-09]
423/500: 100%|██████████| 168/168 [00:00<00:00, 1086.78it/s, train loss 2.346 ; val loss 1.118; lr 1.00e-09]
424/500: 100%|██████████| 168/168 [00:00<00:00, 1093.63it/s, train loss 0.635 ; val loss 1.118; lr 1.00e-09]
425/500: 100%|██████████| 168/168 [00:00<00:00, 1086.77it/s, train loss 1.348 ; val loss 1.118; lr 1.00e-09]
426/500: 100%|██████████| 168/168 [00:00<00:00, 1066.16it/s, train loss 0.662 ; val loss 1.118; lr 1.00e-09]
427/500: 100%|██████████| 168/168 [00:00<00:00, 1079.62it/s, train loss 0.578 ; val loss 1.118; lr 1.00e-09]
428/500: 100%|██████████| 168/168 [00:00<00:00, 1065.95it/s, train loss 0.773 ; val loss 1.118; lr 1.00e-09]
429/500: 100%|██████████| 168/168 [00:00<00:00, 1080.01it/s, train loss 0.494 ; val loss 1.118; lr 1.00e-09]
430/500: 100%|██████████| 168/168 [00:00<00:00, 1046.31it/s, train loss 1.609 ; val loss 1.118; lr 1.00e-09]
431/500: 100%|██████████| 168/168 [00:00<00:00, 1100.79it/s, train loss 1.516 ; val loss 1.118; lr 1.00e-09]
432/500: 100%|██████████| 168/168 [00:00<00:00, 1108.26it/s, train loss 1.455 ; val loss 1.118; lr 1.00e-09]
433/500: 100%|██████████| 168/168 [00:00<00:00, 1108.23it/s, train loss 1.888 ; val loss 1.118; lr 1.00e-09]
434/500: 100%|██████████| 168/168 [00:00<00:00, 1093.69it/s, train loss 0.832 ; val loss 1.118; lr 1.00e-09]
435/500: 100%|██████████| 168/168 [00:00<00:00, 1100.98it/s, train loss 1.104 ; val loss 1.118; lr 1.00e-09]
436/500: 100%|██████████| 168/168 [00:00<00:00, 1080.01it/s, train loss 0.735 ; val loss 1.118; lr 1.00e-09]
437/500: 100%|██████████| 168/168 [00:00<00:00, 1059.43it/s, train loss 0.382 ; val loss 1.118; lr 1.00e-09]
438/500: 100%|██████████| 168/168 [00:00<00:00, 1072.74it/s, train loss 0.684 ; val loss 1.118; lr 1.00e-09]
439/500: 100%|██████████| 168/168 [00:00<00:00, 1086.81it/s, train loss 1.310 ; val loss 1.118; lr 1.00e-09]
440/500: 100%|██████████| 168/168 [00:00<00:00, 1093.63it/s, train loss 1.597 ; val loss 1.118; lr 1.00e-09]
441/500: 100%|██████████| 168/168 [00:00<00:00, 1059.69it/s, train loss 0.746 ; val loss 1.118; lr 1.00e-09]
442/500: 100%|██████████| 168/168 [00:00<00:00, 1100.98it/s, train loss 0.563 ; val loss 1.118; lr 1.00e-09]
443/500: 100%|██████████| 168/168 [00:00<00:00, 1066.33it/s, train loss 1.315 ; val loss 1.118; lr 1.00e-09]
444/500: 100%|██████████| 168/168 [00:00<00:00, 1093.62it/s, train loss 1.328 ; val loss 1.118; lr 1.00e-09]
445/500: 100%|██████████| 168/168 [00:00<00:00, 1093.72it/s, train loss 0.932 ; val loss 1.118; lr 1.00e-09]
446/500: 100%|██████████| 168/168 [00:00<00:00, 1086.85it/s, train loss 1.554 ; val loss 1.118; lr 1.00e-09]
447/500: 100%|██████████| 168/168 [00:00<00:00, 1093.83it/s, train loss 1.116 ; val loss 1.118; lr 1.00e-09]
448/500: 100%|██████████| 168/168 [00:00<00:00, 1086.78it/s, train loss 1.375 ; val loss 1.118; lr 1.00e-09]
449/500: 100%|██████████| 168/168 [00:00<00:00, 1014.76it/s, train loss 0.939 ; val loss 1.118; lr 1.00e-09]
450/500: 100%|██████████| 168/168 [00:00<00:00, 1033.26it/s, train loss 0.911 ; val loss 1.118; lr 1.00e-09]
451/500: 100%|██████████| 168/168 [00:00<00:00, 1002.68it/s, train loss 1.935 ; val loss 1.118; lr 1.00e-09]
452/500: 100%|██████████| 168/168 [00:00<00:00, 1086.57it/s, train loss 0.674 ; val loss 1.118; lr 1.00e-09]
453/500: 100%|██████████| 168/168 [00:00<00:00, 1086.99it/s, train loss 1.045 ; val loss 1.118; lr 1.00e-09]
454/500: 100%|██████████| 168/168 [00:00<00:00, 979.36it/s, train loss 1.306 ; val loss 1.118; lr 1.00e-09]
455/500: 100%|██████████| 168/168 [00:00<00:00, 1079.62it/s, train loss 0.511 ; val loss 1.118; lr 1.00e-09]
456/500: 100%|██████████| 168/168 [00:00<00:00, 1079.81it/s, train loss 0.432 ; val loss 1.118; lr 1.00e-09]
457/500: 100%|██████████| 168/168 [00:00<00:00, 1072.94it/s, train loss 0.943 ; val loss 1.118; lr 1.00e-09]
458/500: 100%|██████████| 168/168 [00:00<00:00, 1059.25it/s, train loss 0.923 ; val loss 1.118; lr 1.00e-09]
459/500: 100%|██████████| 168/168 [00:00<00:00, 1059.44it/s, train loss 1.164 ; val loss 1.118; lr 1.00e-09]
460/500: 100%|██████████| 168/168 [00:00<00:00, 1021.12it/s, train loss 1.862 ; val loss 1.118; lr 1.00e-09]
461/500: 100%|██████████| 168/168 [00:00<00:00, 1046.10it/s, train loss 1.972 ; val loss 1.118; lr 1.00e-09]
462/500: 100%|██████████| 168/168 [00:00<00:00, 1039.82it/s, train loss 0.966 ; val loss 1.118; lr 1.00e-09]
463/500: 100%|██████████| 168/168 [00:00<00:00, 1093.64it/s, train loss 3.646 ; val loss 1.118; lr 1.00e-09]
464/500: 100%|██████████| 168/168 [00:00<00:00, 1086.58it/s, train loss 1.100 ; val loss 1.118; lr 1.00e-09]
465/500: 100%|██████████| 168/168 [00:00<00:00, 1093.83it/s, train loss 0.469 ; val loss 1.118; lr 1.00e-09]
466/500: 100%|██████████| 168/168 [00:00<00:00, 1080.01it/s, train loss 0.655 ; val loss 1.118; lr 1.00e-09]
467/500: 100%|██████████| 168/168 [00:00<00:00, 1079.62it/s, train loss 1.105 ; val loss 1.118; lr 1.00e-09]
468/500: 100%|██████████| 168/168 [00:00<00:00, 1086.58it/s, train loss 0.285 ; val loss 1.118; lr 1.00e-09]
469/500: 100%|██████████| 168/168 [00:00<00:00, 1065.94it/s, train loss 0.760 ; val loss 1.118; lr 1.00e-09]
470/500: 100%|██████████| 168/168 [00:00<00:00, 1079.62it/s, train loss 1.269 ; val loss 1.118; lr 1.00e-09]
471/500: 100%|██████████| 168/168 [00:00<00:00, 1059.43it/s, train loss 1.044 ; val loss 1.118; lr 1.00e-09]
472/500: 100%|██████████| 168/168 [00:00<00:00, 1039.83it/s, train loss 1.008 ; val loss 1.118; lr 1.00e-09]
473/500: 100%|██████████| 168/168 [00:00<00:00, 1072.93it/s, train loss 1.140 ; val loss 1.118; lr 1.00e-09]
474/500: 100%|██████████| 168/168 [00:00<00:00, 1093.83it/s, train loss 1.439 ; val loss 1.118; lr 1.00e-09]
475/500: 100%|██████████| 168/168 [00:00<00:00, 1108.45it/s, train loss 1.405 ; val loss 1.118; lr 1.00e-09]
476/500: 100%|██████████| 168/168 [00:00<00:00, 1108.02it/s, train loss 1.051 ; val loss 1.118; lr 1.00e-09]
477/500: 100%|██████████| 168/168 [00:00<00:00, 1101.02it/s, train loss 1.042 ; val loss 1.118; lr 1.00e-09]
478/500: 100%|██████████| 168/168 [00:00<00:00, 1108.23it/s, train loss 1.073 ; val loss 1.118; lr 1.00e-09]
479/500: 100%|██████████| 168/168 [00:00<00:00, 1052.62it/s, train loss 3.545 ; val loss 1.118; lr 1.00e-09]
480/500: 100%|██████████| 168/168 [00:00<00:00, 1100.98it/s, train loss 0.645 ; val loss 1.118; lr 1.00e-09]
481/500: 100%|██████████| 168/168 [00:00<00:00, 1093.64it/s, train loss 0.803 ; val loss 1.118; lr 1.00e-09]
482/500: 100%|██████████| 168/168 [00:00<00:00, 1100.78it/s, train loss 0.653 ; val loss 1.118; lr 1.00e-09]
483/500: 100%|██████████| 168/168 [00:00<00:00, 1115.56it/s, train loss 0.492 ; val loss 1.118; lr 1.00e-09]
484/500: 100%|██████████| 168/168 [00:00<00:00, 1093.62it/s, train loss 0.341 ; val loss 1.118; lr 1.00e-09]
485/500: 100%|██████████| 168/168 [00:00<00:00, 1115.57it/s, train loss 0.854 ; val loss 1.118; lr 1.00e-09]
486/500: 100%|██████████| 168/168 [00:00<00:00, 1086.57it/s, train loss 0.811 ; val loss 1.118; lr 1.00e-09]
487/500: 100%|██████████| 168/168 [00:00<00:00, 1094.03it/s, train loss 0.789 ; val loss 1.118; lr 1.00e-09]
488/500: 100%|██████████| 168/168 [00:00<00:00, 1072.93it/s, train loss 0.436 ; val loss 1.118; lr 1.00e-09]
489/500: 100%|██████████| 168/168 [00:00<00:00, 1086.58it/s, train loss 0.661 ; val loss 1.118; lr 1.00e-09]
490/500: 100%|██████████| 168/168 [00:00<00:00, 1052.84it/s, train loss 0.891 ; val loss 1.118; lr 1.00e-09]
491/500: 100%|██████████| 168/168 [00:00<00:00, 1046.28it/s, train loss 1.715 ; val loss 1.118; lr 1.00e-09]
492/500: 100%|██████████| 168/168 [00:00<00:00, 1059.51it/s, train loss 1.187 ; val loss 1.118; lr 1.00e-09]
493/500: 100%|██████████| 168/168 [00:00<00:00, 1052.90it/s, train loss 0.863 ; val loss 1.118; lr 1.00e-09]
494/500: 100%|██████████| 168/168 [00:00<00:00, 1080.01it/s, train loss 0.900 ; val loss 1.118; lr 1.00e-09]
495/500: 100%|██████████| 168/168 [00:00<00:00, 1108.23it/s, train loss 1.039 ; val loss 1.118; lr 1.00e-09]
496/500: 100%|██████████| 168/168 [00:00<00:00, 1073.01it/s, train loss 1.252 ; val loss 1.118; lr 1.00e-09]
497/500: 100%|██████████| 168/168 [00:00<00:00, 1086.77it/s, train loss 1.066 ; val loss 1.118; lr 1.00e-09]
498/500: 100%|██████████| 168/168 [00:00<00:00, 1080.08it/s, train loss 1.151 ; val loss 1.118; lr 1.00e-09]
499/500: 100%|██████████| 168/168 [00:00<00:00, 1072.74it/s, train loss 0.700 ; val loss 1.118; lr 1.00e-09]
500/500: 100%|██████████| 168/168 [00:00<00:00, 1072.81it/s, train loss 1.337 ; val loss 1.118; lr 1.00e-09]
###Markdown
Testing
###Code
# for j, (x, y) in enumerate(val_loader):
# model.eval()
# preds = model(x.float().to(device)).squeeze(dim=1)
# print(preds, y, sep='\n')
# print(f"{'=' * 80}")
test_dataset = pd.read_csv(test_data_path)
test_dataset = test_dataset[test_dataset.columns[1:]]
test_dataset = feature_process_pipeline.transform(test_dataset.copy())
test_dataset = test_dataset[selected_feature_idx[1:]]
test_loader = DataLoader(COVID19Dataset(test_dataset.values, None), batch_size=16, shuffle=False)
model.eval()
preds_all = []
for x in test_loader:
x = x.float().to(device)
preds = model(x).detach().cpu().numpy().squeeze()
preds_all.extend(preds)
save_pred(preds_all, "./dl_selected_maxmin_normalized_feature.txt")
###Output
_____no_output_____ |
Big-Data-Clusters/CU9/public/content/monitor-k8s/tsg098-get-replicasets.ipynb | ###Markdown
TSG098 - Get BDC replicasets (Kubernetes) Description Steps Common functionsDefine helper functions used in this notebook.
###Code
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportability, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
# Display an install HINT, so the user can click on a SOP to install the missing binary
#
if which_binary == None:
print(f"The path used to search for '{cmd_actual[0]}' was:")
print(sys.path)
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
# Hints for tool retry (on transient fault), known errors and install guide
#
retry_hints = {'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use', 'Login timeout expired (0) (SQLDriverConnect)', 'SSPI Provider: No Kerberos credentials available', ], 'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond', ], 'python': [ ], }
error_hints = {'azdata': [['Please run \'azdata login\' to first authenticate', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Can\'t open lib \'ODBC Driver 17 for SQL Server', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb'], ['NameError: name \'azdata_login_secret_name\' is not defined', 'SOP013 - Create secret for azdata login (inside cluster)', '../common/sop013-create-secret-for-azdata-login.ipynb'], ['ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.', 'TSG124 - \'No credentials were supplied\' error from azdata login', '../repair/tsg124-no-credentials-were-supplied.ipynb'], ['Please accept the license terms to use this product through', 'TSG126 - azdata fails with \'accept the license terms to use this product\'', '../repair/tsg126-accept-license-terms.ipynb'], ], 'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb'], ], 'python': [['Library not loaded: /usr/local/opt/unixodbc', 'SOP012 - Install unixodbc for Mac', '../install/sop012-brew-install-odbc-for-sql-server.ipynb'], ['WARNING: You are using pip version', 'SOP040 - Upgrade pip in ADS Python sandbox', '../install/sop040-upgrade-pip.ipynb'], ], }
install_hint = {'azdata': [ 'SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb' ], 'kubectl': [ 'SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb' ], }
print('Common functions defined successfully.')
###Output
_____no_output_____
###Markdown
Get the Kubernetes namespace for the big data clusterGet the namespace of the Big Data Cluster use the kubectl command lineinterface .**NOTE:**If there is more than one Big Data Cluster in the target Kubernetescluster, then either:- set \[0\] to the correct value for the big data cluster.- set the environment variable AZDATA_NAMESPACE, before starting Azure Data Studio.
###Code
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True)
except:
from IPython.display import Markdown
print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.")
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}')
###Output
_____no_output_____
###Markdown
Run kubectl to display the replica sets
###Code
run(f"kubectl get replicaset -n {namespace} -o wide")
print("Notebook execution is complete.")
###Output
_____no_output_____ |
Practical_NLP_in_PyTorch-master/allennlp/elmo_text_classification.ipynb | ###Markdown
Set random seed manually to replicate results
###Code
torch.manual_seed(config.seed)
###Output
_____no_output_____
###Markdown
Load Data
###Code
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.dataset_readers import DatasetReader
###Output
_____no_output_____
###Markdown
Prepare dataset
###Code
label_cols = ["toxic", "severe_toxic", "obscene",
"threat", "insult", "identity_hate"]
from allennlp.data.fields import TextField, MetadataField, ArrayField
class JigsawDatasetReader(DatasetReader):
def __init__(self, tokenizer: Callable[[str], List[str]]=lambda x: x.split(),
token_indexers: Dict[str, TokenIndexer] = None,
max_seq_len: Optional[int]=config.max_seq_len) -> None:
super().__init__(lazy=False)
self.tokenizer = tokenizer
self.token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self.max_seq_len = max_seq_len
@overrides
def text_to_instance(self, tokens: List[Token], id: str=None,
labels: np.ndarray=None) -> Instance:
sentence_field = TextField(tokens, self.token_indexers)
fields = {"tokens": sentence_field}
id_field = MetadataField(id)
fields["id"] = id_field
if labels is None:
labels = np.zeros(len(label_cols))
label_field = ArrayField(array=labels)
fields["label"] = label_field
return Instance(fields)
@overrides
def _read(self, file_path: str) -> Iterator[Instance]:
df = pd.read_csv(file_path)
if config.testing: df = df.head(1000)
for i, row in df.iterrows():
yield self.text_to_instance(
[Token(x) for x in self.tokenizer(row["comment_text"])],
row["id"], row[label_cols].values,
)
###Output
_____no_output_____
###Markdown
Prepare token handlers We will use the spacy tokenizer here
###Code
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from allennlp.data.token_indexers.elmo_indexer import ELMoCharacterMapper, ELMoTokenCharactersIndexer
# the token indexer is responsible for mapping tokens to integers
token_indexer = ELMoTokenCharactersIndexer()
def tokenizer(x: str):
return [w.text for w in
SpacyWordSplitter(language='en_core_web_sm',
pos_tags=False).split_words(x)[:config.max_seq_len]]
reader = JigsawDatasetReader(
tokenizer=tokenizer,
token_indexers={"tokens": token_indexer}
)
train_ds, test_ds = (reader.read(DATA_ROOT / fname) for fname in ["train.csv", "test_proced.csv"])
val_ds = None
len(train_ds)
train_ds[:10]
vars(train_ds[0].fields["tokens"])
###Output
_____no_output_____
###Markdown
Prepare vocabulary We don't need to build the vocab: all that is handled by the token indexer
###Code
vocab = Vocabulary()
###Output
_____no_output_____
###Markdown
Prepare iterator The iterator is responsible for batching the data and preparing it for input into the model. We'll use the BucketIterator that batches text sequences of smilar lengths together.
###Code
from allennlp.data.iterators import BucketIterator
iterator = BucketIterator(batch_size=config.batch_size,
sorting_keys=[("tokens", "num_tokens")],
)
###Output
_____no_output_____
###Markdown
We need to tell the iterator how to numericalize the text data. We do this by passing the vocabulary to the iterator. This step is easy to forget so be careful!
###Code
iterator.index_with(vocab)
###Output
_____no_output_____
###Markdown
Read sample
###Code
batch = next(iter(iterator(train_ds)))
batch
batch["tokens"]["tokens"]
batch["tokens"]["tokens"].shape
###Output
_____no_output_____
###Markdown
Prepare Model
###Code
import torch
import torch.nn as nn
import torch.optim as optim
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder, PytorchSeq2VecWrapper
from allennlp.nn.util import get_text_field_mask
from allennlp.models import Model
from allennlp.modules.text_field_embedders import TextFieldEmbedder
class BaselineModel(Model):
def __init__(self, word_embeddings: TextFieldEmbedder,
encoder: Seq2VecEncoder,
out_sz: int=len(label_cols)):
super().__init__(vocab)
self.word_embeddings = word_embeddings
self.encoder = encoder
self.projection = nn.Linear(self.encoder.get_output_dim(), out_sz)
self.loss = nn.BCEWithLogitsLoss()
def forward(self, tokens: Dict[str, torch.Tensor],
id: Any, label: torch.Tensor) -> torch.Tensor:
mask = get_text_field_mask(tokens)
embeddings = self.word_embeddings(tokens)
state = self.encoder(embeddings, mask)
class_logits = self.projection(state)
output = {"class_logits": class_logits}
output["loss"] = self.loss(class_logits, label)
return output
###Output
_____no_output_____
###Markdown
Prepare embeddings
###Code
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import ElmoTokenEmbedder
options_file = 'https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_options.json'
weight_file = 'https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5'
elmo_embedder = ElmoTokenEmbedder(options_file, weight_file)
word_embeddings = BasicTextFieldEmbedder({"tokens": elmo_embedder})
from allennlp.modules.seq2vec_encoders import PytorchSeq2VecWrapper
encoder: Seq2VecEncoder = PytorchSeq2VecWrapper(nn.LSTM(word_embeddings.get_output_dim(), config.hidden_sz, bidirectional=True, batch_first=True))
###Output
_____no_output_____
###Markdown
Notice how simple and modular the code for initializing the model is. All the complexity is delegated to each component.
###Code
model = BaselineModel(
word_embeddings,
encoder,
)
if USE_GPU: model.cuda()
else: model
###Output
_____no_output_____
###Markdown
Basic sanity checks
###Code
batch = nn_util.move_to_device(batch, 0 if USE_GPU else -1)
tokens = batch["tokens"]
labels = batch
tokens
mask = get_text_field_mask(tokens)
mask
embeddings = model.word_embeddings(tokens)
state = model.encoder(embeddings, mask)
class_logits = model.projection(state)
class_logits
model(**batch)
loss = model(**batch)["loss"]
loss
loss.backward()
[x.grad for x in list(model.encoder.parameters())]
###Output
_____no_output_____
###Markdown
Train
###Code
optimizer = optim.Adam(model.parameters(), lr=config.lr)
from allennlp.training.trainer import Trainer
trainer = Trainer(
model=model,
optimizer=optimizer,
iterator=iterator,
train_dataset=train_ds,
cuda_device=0 if USE_GPU else -1,
num_epochs=config.epochs,
)
metrics = trainer.train()
###Output
02/07/2019 17:36:24 - INFO - allennlp.training.trainer - Beginning training.
02/07/2019 17:36:24 - INFO - allennlp.training.trainer - Epoch 0/1
02/07/2019 17:36:24 - INFO - allennlp.training.trainer - Peak CPU memory usage MB: 1255.145472
02/07/2019 17:36:24 - INFO - allennlp.training.trainer - Training
loss: 0.6855 ||: 100%|██████████| 5/5 [00:36<00:00, 8.01s/it]
02/07/2019 17:37:01 - INFO - allennlp.training.trainer - Training | Validation
02/07/2019 17:37:01 - INFO - allennlp.training.trainer - loss | 0.686 | N/A
02/07/2019 17:37:01 - INFO - allennlp.training.trainer - cpu_memory_MB | 1255.145 | N/A
02/07/2019 17:37:01 - INFO - allennlp.training.trainer - Epoch duration: 00:00:36
02/07/2019 17:37:01 - INFO - allennlp.training.trainer - Estimated training time remaining: 0:00:36
02/07/2019 17:37:01 - INFO - allennlp.training.trainer - Epoch 1/1
02/07/2019 17:37:01 - INFO - allennlp.training.trainer - Peak CPU memory usage MB: 3154.649088
02/07/2019 17:37:01 - INFO - allennlp.training.trainer - Training
loss: 0.6464 ||: 100%|██████████| 5/5 [00:51<00:00, 11.81s/it]
02/07/2019 17:37:52 - INFO - allennlp.training.trainer - Training | Validation
02/07/2019 17:37:52 - INFO - allennlp.training.trainer - loss | 0.646 | N/A
02/07/2019 17:37:52 - INFO - allennlp.training.trainer - cpu_memory_MB | 3154.649 | N/A
02/07/2019 17:37:52 - INFO - allennlp.training.trainer - Epoch duration: 00:00:51
###Markdown
Generating Predictions
###Code
from allennlp.data.iterators import DataIterator
from tqdm import tqdm
from scipy.special import expit # the sigmoid function
def tonp(tsr): return tsr.detach().cpu().numpy()
class Predictor:
def __init__(self, model: Model, iterator: DataIterator,
cuda_device: int=-1) -> None:
self.model = model
self.iterator = iterator
self.cuda_device = cuda_device
def _extract_data(self, batch) -> np.ndarray:
out_dict = self.model(**batch)
return expit(tonp(out_dict["class_logits"]))
def predict(self, ds: Iterable[Instance]) -> np.ndarray:
pred_generator = self.iterator(ds, num_epochs=1, shuffle=False)
self.model.eval()
pred_generator_tqdm = tqdm(pred_generator,
total=self.iterator.get_num_batches(ds))
preds = []
with torch.no_grad():
for batch in pred_generator_tqdm:
batch = nn_util.move_to_device(batch, self.cuda_device)
preds.append(self._extract_data(batch))
return np.concatenate(preds, axis=0)
from allennlp.data.iterators import BasicIterator
# iterate over the dataset without changing its order
seq_iterator = BasicIterator(batch_size=64)
seq_iterator.index_with(vocab)
predictor = Predictor(model, seq_iterator, cuda_device=0 if USE_GPU else -1)
train_preds = predictor.predict(train_ds)
test_preds = predictor.predict(test_ds)
###Output
100%|██████████| 5/5 [01:44<00:00, 18.25s/it]
100%|██████████| 4/4 [00:50<00:00, 13.33s/it]
|
examples/example_cylinder_models.ipynb | ###Markdown
Cylinder Models In this section, we describe models of intra-axonal diffusion.In all cases, the intra-axonal diffusion is represented using axially symmetric cylinder models with $\boldsymbol{\mu}\in\mathbb{S}^2$ the orientation parallel to the cylinder axis.The three-dimensional diffusion signal in these models is given as the separable product of (free) parallel and restricted perpendicular diffusion *(Assaf et al. 2004)*.This means that the three-dimensional signal is given by\begin{equation} E_{\textrm{intra}}(\textbf{q},\Delta,\delta,\lambda_\parallel,R) = E_\parallel(q_\parallel,\Delta,\delta,\lambda_\parallel)\times E_\perp(q_\perp,\Delta,\delta,R)\end{equation}with parallel q-value $q_\parallel=\textbf{q}^T\boldsymbol{\mu}$, perpendicular q-value $q_\perp=(\textbf{q}^T\textbf{q}-(\textbf{q}^T\boldsymbol{\mu})^2))^{1/2}$, parallel diffusivity $\lambda_\parallel>0$ and cylinder radius $R>0$[mm]. The parallel signal is usually given by Gaussian diffusion as\begin{equation}E_\parallel(q_\parallel,\Delta,\delta,\lambda_\parallel)=\exp(-4\pi^2q_\parallel^2\lambda_\parallel(\Delta-\delta/3)).\end{equation}The perpendicular signal $E_\perp$ is described using various cylinder models.In the rest of this section, we start with describing the simplest, having the strongest tissue assumptions (C1), and more towards more general models (C4). Stick: C1The simplest model for intra-axonal diffusion is the ``Stick'' -- a cylinder with zero radius *(Behrens et al. 2003)*.The Stick model assumes that, because axon diameters are very small, the perpendicular diffusion attenuation inside these axons is negligible compared to the overall signal attenuation.The perpendicular diffusion coefficient is therefore be approximated by zero, so the perpendicular signal attenuation is always equal to one as $E_\perp=1$.Inserting this definition into the equation above leads to the simple signal representation\begin{equation}E_{\textrm{Stick}}(b,\textbf{n},\boldsymbol{\mu},\lambda_\parallel)=\exp(-b\lambda_\parallel(\textbf{n}^T\boldsymbol{\mu})^2),\end{equation}which is the same as a DTI Tensor with $\lambda_\parallel=\lambda_1$ and $\lambda_\perp=\lambda_2=\lambda_3=0$.Despite its simplicity, it turns out approximating axons as Sticks is quite reasonable at clinical gradient strengths *(Burcaw et al. 2015)*.In fact, the Stick is used in the most state-of-the-art microstructure models modeling axonal dispersion *(Tariq et al. 2016, Kaden et al. 2016)*.
###Code
from dmipy.signal_models import cylinder_models
from dmipy.core.acquisition_scheme import acquisition_scheme_from_bvalues
import numpy as np
stick = cylinder_models.C1Stick(mu=[0, 0], lambda_par=1.7e-9)
Nsamples = 100
bvecs_parallel = np.tile(np.r_[0., 0., 1.], (Nsamples, 1))
bvecs_perpendicular = np.tile(np.r_[0., 1., 0.], (Nsamples, 1))
bvals = np.linspace(0, 2e9, Nsamples)
delta = 0.01
Delta = 0.03
scheme_parallel = acquisition_scheme_from_bvalues(bvals, bvecs_parallel, delta, Delta)
scheme_perpendicular = acquisition_scheme_from_bvalues(bvals, bvecs_perpendicular, delta, Delta)
Estick_parallel = stick(scheme_parallel)
Estick_perpendicular = stick(scheme_perpendicular)
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(bvals, Estick_parallel, label="Stick $E_\parallel$")
plt.plot(bvals, Estick_perpendicular, label="Stick $E_\perp$")
plt.legend(fontsize=12)
plt.title("Signal attenuation Stick", fontsize=17)
plt.xlabel("b-value [s/m$^2$]", fontsize=15)
plt.ylabel("Signal Attenuation", fontsize=15);
###Output
_____no_output_____
###Markdown
Stejskal-Tanner Cylinder: C2In reality, axons have a non-zero radius.To account for this, different cylinder models for perpendicular diffusion have been proposed for different combinations of PGSE acquisition parameters.The simplest is the Stejskal-Tanner approximation of the cylinder *(Soderman and Johnson 1995)*, which has the hardest assumptions on the PGSE protocol.First, it assumes that pulse length $\delta$ is so short that no diffusion occurs during the application of the gradient pulse ($\delta\rightarrow0$).Second, it assumes that pulse separation $\Delta$ is long enough for diffusion with intra-cylindrical diffusion coefficient $D$ to be restricted inside a cylinder of radius $R$ ($\Delta\gg R^2/D$).Within these assumptions, the perpendicular, intra-cylindrical signal attenuation is given as\begin{equation} E_\perp(q,R|\delta\rightarrow0,\Delta\gg R^2/D)=\left(\frac{J_1(2\pi q R)}{\pi q R}\right)^2,\end{equation}where we use the ``$|$'' to separate function parameters from model assumptions, and $J_1$ is a Bessel function of the first kind. Taking $\lim_{R\rightarrow0}$ of this equation simplifies the three-dimensional Soderman model to the Stick model.
###Code
from dmipy.core.acquisition_scheme import acquisition_scheme_from_qvalues
stesjskal_tanner = cylinder_models.C2CylinderStejskalTannerApproximation(mu=[0, 0], lambda_par=1.7e-9)
Nsamples = 100
bvecs_perpendicular = np.tile(np.r_[0., 1., 0.], (Nsamples, 1))
qvals = np.linspace(0, 3e5, Nsamples)
delta = 0.01
Delta = 0.03
scheme_perpendicular = acquisition_scheme_from_qvalues(qvals, bvecs_perpendicular, delta, Delta)
for diameter in np.linspace(1e-6, 1e-5, 5):
plt.plot(qvals, stesjskal_tanner(scheme_perpendicular, diameter=diameter),
label="Diameter="+str(1e6 * diameter)+"$\mu m$")
plt.legend(fontsize=12)
plt.title("Stesjkal-Tanner attenuation over cylinder diameter", fontsize=17)
plt.xlabel("perpendicular q-value [1/m]", fontsize=15)
plt.ylabel("E(q$_\perp$)", fontsize=15);
###Output
_____no_output_____
###Markdown
Callaghan Cylinder: C3The ``Callaghan'' model relaxes Soderman's $\Delta\gg R^2/D$ assumption to allow for unrestricted diffusion at shorter pulse separation $\Delta$ *(Callaghan 1995)*. In this case, the perpendicular signal attenuation is given as\begin{align} E_\perp(q,\Delta,R|\delta\rightarrow0)&=\sum^\infty_k4\exp(-\beta^2_{0k}D\Delta/R^2)\times \frac{\left((2\pi qR)J_0^{'}(2\pi qR)\right)^2}{\left((2\pi qR)^2-\beta_{0k}^2\right)^2}\nonumber\\ &+\sum^\infty_{nk}8\exp(-\beta^2_{nk}D\Delta/R^2)\times \frac{\beta^2_{nk}}{\left(\beta_{nk}^2-n^2\right)}\times\frac{\left((2\pi qR)J_n^{'}(2\pi qR)\right)^2}{\left((2\pi qR)^2-\beta_{nk}^2\right)^2}\end{align}where $J_n^{'}$ are the derivatives of the $n^{th}$-order Bessel function and $\beta_{nk}$ are the arguments that result in zero-crossings. Taking $\lim_{\Delta\rightarrow\infty}$ of this equation simplifies the Callaghan model to the Soderman model. The Callaghan model has been used to estimate the axon diameter distribution in the multi-compartment AxCaliber approach *(Assaf et al. 2008)*. However, the authors also mention that the perpendicular diffusion is likely already restricted for realistic axon diameters ($<2\mu$m) *(Aboitiz et al. 1992)* for the shortest possible $\Delta$ in PGSE protocols (${\sim}10$ms). This limits the added value of the Callaghan model over the Soderman model in axon diameter estimation.
###Code
callaghan = cylinder_models.C3CylinderCallaghanApproximation(mu=[0, 0], lambda_par=1e-7)
Nsamples = 100
bvecs_perpendicular = np.tile(np.r_[0., 1., 0.], (Nsamples, 1))
qvals = np.linspace(0, 3e5, Nsamples)
delta = 0.001
Delta = 0.001
scheme_perpendicular = acquisition_scheme_from_qvalues(qvals, bvecs_perpendicular, delta, Delta)
plt.plot(qvals, np.exp(-scheme_perpendicular.bvalues * 1.7e-9), label="Free Diffusion", c='r', ls='--')
for Delta in [0.001, 0.0025, 0.015]:
scheme_perpendicular = acquisition_scheme_from_qvalues(qvals, bvecs_perpendicular, delta, Delta)
plt.plot(qvals, callaghan(scheme_perpendicular, diameter=10e-6), label='Callaghan Delta='+str(1e3 * Delta)+'ms')
plt.plot(qvals, stesjskal_tanner(scheme_perpendicular, diameter=10e-6), label="Soderman", c='blue', ls='--')
plt.legend()
###Output
_____no_output_____
###Markdown
For a big cylinder of 10$\mu$ diameter, it can be seen that free diffusion and the Callaghan model are very similar for an extremely short pulse separation of 1ms. The signal is already becoming significantly restricted at 2.5ms, and at 15ms the Callaghan and Soderman approximations have converged (completely restricted).This shows the problem of using the Callaghan model for axon diameter estimation - for axons of diameter 0.1-2 $\mu$m the diffusion is already restricted around 1 or 2 ms, meaning there is no signal contrast for intra-axonal diffusion when Delta varies. Gaussian Phase Cylinder: C4The last cylinder model generalization we discuss is the "Van Gelderen" model *(VanGelderen et al. 1994)*, which relaxes the last $\delta\rightarrow0$ assumption to allow for finite pulse length $\delta$. This model is based on the ``Neuman'' model *(Neuman 1974)*, which assumes Gaussian diffusion during the gradient pulse. In this case, the signal attenuation is given as\begin{equation} E_\perp(q,\Delta,\delta,R)=-8\pi^2q^2\sum^\infty_{m=1}\dfrac{\left[2Da_m^2\delta-2 + 2e^{-Da_m^2\delta} + 2e^{-Da_m^2\Delta}-e^{-Da_m^2(\Delta-\delta)}-e^{-Da_m^2(\Delta-\delta)}\right]}{\delta^2D^2a_m^6(R^2a_m^2-1)}\end{equation}where $a_m$ are roots of the equation $J_1^{'}(a_mR)=0$, with $J_1^{'}$ again the derivative of the Bessel function of the first kind, and $D$ is the intra-axonal diffusivity.According to *(Neuman 1974)*, taking the double $\lim_{(\delta,\Delta)\rightarrow(0,\infty)}$ of the equation above should simplify the Van Gelderen model to the Soderman Model, although he does not show this explicitly.For its generality, the Van Gelderen model has been used in most recent studies regarding in-vivo axon diameter estimation *(Huang et al. 2015, Ferizi et al. 2015, De Santis et al. 2016 )*.
###Code
vangelderen = cylinder_models.C4CylinderGaussianPhaseApproximation()
###Output
_____no_output_____ |
notebooks/Upwelling/ATW_relaxation_Susan.ipynb | ###Markdown
ATW relaxation notebook
###Code
import numpy as np
import matplotlib.pyplot as plt
import warnings
from copy import deepcopy
# Global constants
f = 1e-4 # [s-1]
g = 9.81 # [m s-2]
%matplotlib inline
plt.rcParams['font.size'] = 14
warnings.simplefilter('ignore')
###Output
_____no_output_____
###Markdown
Analytical solutionStart with the linearized, steady state shallow water equations with linear friction and longshore windstress. Assume cross-shore geostrophic balance.\begin{align}f\mathbf{k}\times\mathbf{u} & = -g\nabla\eta + \frac{1}{h}\left(\tau_y - \mu v\right)\hat{\jmath} \tag{1a} \\0 & = \nabla\cdot h\mathbf{u} \tag{1b}\end{align}Taking the curl of (1a) and solving for $\eta$ gives the the Arrested Topography Wave (ATW) of Csanady (1978 *JPO*). I have oriented the problem to $x\to-\infty$ offshore such that $\frac{\partial h}{\partial x} = -s$.$$\frac{\partial^2\eta}{\partial x^2} - \frac{1}{\kappa}\frac{\partial\eta}{\partial y} = 0, \hspace{0.5cm} \frac{1}{\kappa} = \frac{fs}{\mu}\tag{2}$$The coastal boundary condition (obtained from 1a) requires $u \to 0$ and $h \to 0$$$\frac{\partial\eta}{\partial x}(0, y) = \frac{\tau_yf}{\mu g} = q_0 \tag{3}$$Equation (2) is analogous to a constant heat flux boundary condition. The solution is given by Carslaw and Jaeger 1959 (p. 112)$$\eta(x, y) = \frac{\kappa q_0y}{L} + q_0L\left\{\frac{3(x + L)^2 - L^2}{6L^2} - \frac{2}{\pi^2}\sum_{n=1}^\infty\frac{(-1)^n}{n^2}\exp\left(\frac{-\kappa n^2\pi^2y}{L^2}\right)\cos\left(\frac{n\pi(x + L)}{L}\right)\right\} \tag{4}$$which, as $y\to\infty$, reduces to$$\eta(x, y) = \frac{\kappa q_0y}{L} + q_0L\frac{3(x + L)^2 - L^2}{6L^2} \tag{5}$$ Calculate $\eta$ according to equation (5)
###Code
def calc_eta(x, y, L, kappa, q_0):
"""Calculate eta according to equation 5
"""
return kappa * q_0 * y / L + q_0 * L * (3 * (x + L)**2 - L**2) / (6 * L**2)
###Output
_____no_output_____
###Markdown
Find $\eta$ given problem parameters
###Code
# Constants
L = 1e3 # Slope width [m]
tau_y = -1e-4 # Kinematic wind stress [m2 s-2]
mu = 1e-2 # Linear friction coefficient [s-1]
s = 1 # Shelf slope [dimensionless]
# Terms (heat equation analogues)
kappa = mu / (f * s) # 'Diffusivity' of eta
q_0 = tau_y * f / (mu * g) # 'Flux' of eta through boundary
print(q_0)
print(kappa)
# Coordinates
dL = L * 1e-2
xi = np.arange(-L, 0, dL) # x-direction, second coordinate
yi = np.arange(0, L, dL) # y-direction, first coordinate
x, y = np.meshgrid(xi, yi)
# Solution
eta = calc_eta(x, y, L, kappa, q_0)
###Output
-1.0193679918450561e-07
100.0
###Markdown
Plot $\eta$ solution
###Code
# Plot eta
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.contour(xi/L, yi/L, eta, colors='k')
for tick in np.arange(0, 1, 0.015):
ax.plot([0, 0.005], [tick, tick+0.005], 'k-', clip_on=False)
ax.set_xlabel('$\longleftarrow$ $X/L$')
ax.set_ylabel('$\longleftarrow$ $Y/L$')
ax.xaxis.set_ticks([-1, 0])
ax.yaxis.set_ticks([0, 1])
ax.xaxis.set_ticklabels(['$-L$', 0])
ax.yaxis.set_ticklabels([0, '$L$'])
ax.tick_params(direction='out', pad=8)
ax.set_xlim([0, -1])
ax.set_ylim([1, 0])
ax.text(0.02, 0.05, 'Low $\eta$', transform=ax.transAxes)
ax.text(0.85, 0.9, 'High $\eta$', transform=ax.transAxes)
ax.text(0.03, 0.46, '$\\tau_y$', transform=ax.transAxes)
ax.arrow(0.04, 0.5, 0, 0.1, transform=ax.transAxes, head_width=0.01, facecolor='k')
ax.set_title('Cross-shelf bottom slope (ATW) solution')
plt.show()
###Output
_____no_output_____
###Markdown
Relaxation solutionThree schemes:Centered difference$$r_{i, j}^{(n)} = \frac{\eta_{i, j+1}^{(n)} - \eta_{i, j-1}^{(n)}}{2\Delta y} - \kappa\frac{\eta_{i+1, j}^{(n)} - 2\eta_{i, j}^{(n)} + \eta_{i-1, j}^{(n)}}{\Delta x^2}$$$$\eta_{i, j}^{(n+1)} = \eta_{i, j}^{(n)} - \frac{\mu\Delta x^2}{2\kappa}r_{i, j}^{(n)}$$Upstream Euler$$r_{i, j}^{(n)} = \frac{\eta_{i, j+1}^{(n)} - \eta_{i, j}^{(n)}}{\Delta y} - \kappa\frac{\eta_{i+1, j}^{(n)} - 2\eta_{i, j}^{(n)} + \eta_{i-1, j}^{(n)}}{\Delta x^2}$$$$\eta_{i, j}^{(n+1)} = \eta_{i, j}^{(n)} - \frac{\mu}{\left(\frac{2\kappa}{\Delta x} - 1\right)}r_{i, j}^{(n)}$$Downstream Euler$$r_{i, j}^{(n)} = \frac{\eta_{i, j}^{(n)} - \eta_{i, j-1}^{(n)}}{\Delta y} - \kappa\frac{\eta_{i+1, j}^{(n)} - 2\eta_{i, j}^{(n)} + \eta_{i-1, j}^{(n)}}{\Delta x^2}$$$$\eta_{i, j}^{(n+1)} = \eta_{i, j}^{(n)} - \frac{\mu}{\left(\frac{2\kappa}{\Delta x} + 1\right)}r_{i, j}^{(n)}$$Only the downstream Euler is stable. Find $\eta$ by relaxation
###Code
# Find phi by relaxation
# Parameters
M = eta.shape[0] # matrix size
mu = 1 # SOR convergence parameter
TOL = 1e-4 # Convergence tolerance
dissipation = 0.
# Allocate arrays
# Solution
eta_soln = calc_eta(x, y, L, kappa, q_0)
eta_next = np.copy(eta_soln) # getting the boundary conditions correct here
eta = np.zeros(eta.shape) # start from zero to show this is working
res = np.zeros(eta.shape)
# Make figure array
fig, axs = plt.subplots(1, 3, figsize=(17, 6))
N=100
# Relaxation loop
for n in range(N):
for i in range(1, M-1): # Longshore step
for j in range(2, M-1): # Cross-shore step : start from 2 to preserve gradient boundary condition
#Downstream Euler : note the switch in i and j, i is dy, j is dx
res[i, j] = (eta[i, j] - eta[i-1, j]) / (dL) - kappa * (eta[i, j+1] - 2 * eta[i, j] + eta[i, j-1]) / dL**2
eta_next[i, j] = eta[i, j] - mu / (2 * kappa / dL + 1) * res[i, j]
eta = eta_next # move this into the loop for faster convergence
if dL**2 * np.max(abs(res)) / np.max(abs(eta)) < TOL and n > 5: # n > 5 just because I'm starting from 0.
print('done', n)
break
# Plot results
mesh = axs[0].pcolormesh(xi/L, yi/L, eta_soln)
fig.colorbar(mesh, ax=axs[0])
mesh = axs[1].contour(xi/L, yi/L, res, colors='b')
fig.colorbar(mesh, ax=axs[1])
mesh=axs[2].pcolormesh(xi/L, yi/L, eta_next)
fig.colorbar(mesh, ax=axs[2])
for ax in axs:
ax.set_xlim([0, -1])
ax.set_ylim([1, 0])
###Output
_____no_output_____ |
notebooks/lecture_6.ipynb | ###Markdown
Data Loading and Storage Accessing data is a necessary first step for using most of the tools in this book. I’mgoing to be focused on data input and output using pandas, though there are numeroustools in other libraries to help with reading and writing data in various formats.Input and output typically falls into a few main categories: reading text files and othermore efficient on-disk formats, loading data from databases, and interacting with networksources like web APIs.
###Code
import numpy as np
import pandas as pd
np.random.seed(12345)
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(10, 6))
np.set_printoptions(precision=4, suppress=True)
###Output
_____no_output_____
###Markdown
Reading and Writing Data in Text Formatpandas features a number of functions for reading tabular data as a DataFrameobject. read_csv and read_table are typically used the most.
###Code
!cat examples/ex1.csv
###Output
a,b,c,d,message
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
###Markdown
Because of how messy data in the real world can be, some of the data loading functions(especially read_csv) have grown very complex in their options over time. It’snormal to feel overwhelmed by the number of different parameters (read_csv hasover 50 as of this writing). The online pandas documentation has many examplesabout how each of them works, so if you’re struggling to read a particular file, theremight be a similar enough example to help you find the right parameters.Handling dates and other custom types can require extra effort. Let’s start with asmall comma-separated (CSV) text file:
###Code
df = pd.read_csv('examples/ex1.csv')
df
###Output
_____no_output_____
###Markdown
We could also have used read_table and specified the delimiter:
###Code
pd.read_table('examples/ex1.csv', sep=',')
###Output
_____no_output_____
###Markdown
A file will not always have a header row.
###Code
!cat examples/ex2.csv
###Output
_____no_output_____
###Markdown
To read this file, you have a couple of options. You can allow pandas to assign defaultcolumn names, or you can specify names yourself:
###Code
pd.read_csv('examples/ex2.csv', header=None)
pd.read_csv('examples/ex2.csv', names=['a', 'b', 'c', 'd', 'message'])
###Output
_____no_output_____
###Markdown
Suppose you wanted the message column to be the index of the returned DataFrame.You can either indicate you want the column at index 4 or named 'message' usingthe index_col argument:
###Code
names = ['a', 'b', 'c', 'd', 'message']
pd.read_csv('examples/ex2.csv', names=names, index_col='message')
###Output
_____no_output_____
###Markdown
In the event that you want to form a hierarchical index from multiple columns, pass alist of column numbers or names:
###Code
!cat examples/csv_mindex.csv
parsed = pd.read_csv('examples/csv_mindex.csv',
index_col=['key1', 'key2'])
parsed
###Output
key1,key2,value1,value2
one,a,1,2
one,b,3,4
one,c,5,6
one,d,7,8
two,a,9,10
two,b,11,12
two,c,13,14
two,d,15,16
###Markdown
In some cases, a table might not have a fixed delimiter, using whitespace or someother pattern to separate fields. Consider a text file that looks like this:
###Code
list(open('examples/ex3.txt'))
###Output
_____no_output_____
###Markdown
While you could do some munging by hand, the fields here are separated by a variableamount of whitespace. In these cases, you can pass a regular expression as adelimiter for read_table. This can be expressed by the regular expression \s+, so wehave then:
###Code
result = pd.read_table('examples/ex3.txt', sep='\s+')
result
###Output
_____no_output_____
###Markdown
Because there was one fewer column name than the number of data rows,read_table infers that the first column should be the DataFrame’s index in this specialcase.The parser functions have many additional arguments to help you handle the widevariety of exception file formats that occur (see a partial listing in Table 6-2). Forexample, you can skip the first, third, and fourth rows of a file with skiprows:
###Code
!cat examples/ex4.csv
pd.read_csv('examples/ex4.csv', skiprows=[0, 2, 3])
###Output
# hey!
a,b,c,d,message
# just wanted to make things more difficult for you
# who reads CSV files with computers, anyway?
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
###Markdown
Handling missing values is an important and frequently nuanced part of the file parsingprocess. Missing data is usually either not present (empty string) or marked bysome sentinel value. By default, pandas uses a set of commonly occurring sentinels,such as NA and NULL:
###Code
!cat examples/ex5.csv
result = pd.read_csv('examples/ex5.csv')
result
pd.isnull(result)
###Output
_____no_output_____
###Markdown
The na_values option can take either a list or set of strings to consider missingvalues:
###Code
result = pd.read_csv('examples/ex5.csv', na_values=['NULL'])
result
###Output
_____no_output_____
###Markdown
Different NA sentinels can be specified for each column in a dict:
###Code
sentinels = {'message': ['foo', 'NA'], 'something': ['two']}
pd.read_csv('examples/ex5.csv', na_values=sentinels)
###Output
_____no_output_____
###Markdown
Reading Text Files in PiecesWhen processing very large files or figuring out the right set of arguments to correctlyprocess a large file, you may only want to read in a small piece of a file or iteratethrough smaller chunks of the file.Before we look at a large file, we make the pandas display settings more compact:
###Code
pd.options.display.max_rows = 10
result = pd.read_csv('examples/ex6.csv')
result
###Output
_____no_output_____
###Markdown
If we want to only read a small number of rows (avoiding reading the entire file),specify that with nrows:
###Code
pd.read_csv('examples/ex6.csv', nrows=5)
###Output
_____no_output_____
###Markdown
To read a file in pieces, specify a chunksize as a number of rows:
###Code
chunker = pd.read_csv('examples/ex6.csv', chunksize=1000)
chunker
###Output
_____no_output_____
###Markdown
The TextParser object returned by read_csv allows you to iterate over the parts ofthe file according to the chunksize. For example, we can iterate over ex6.csv, aggregatingthe value counts in the 'key' column like so:
###Code
chunker = pd.read_csv('examples/ex6.csv', chunksize=1000)
tot = pd.Series([])
for piece in chunker:
tot = tot.add(piece['key'].value_counts(), fill_value=0)
tot = tot.sort_values(ascending=False)
tot[:10]
###Output
_____no_output_____
###Markdown
Writing Data to Text FormatData can also be exported to a delimited format. Let’s consider one of the CSV filesread before:
###Code
data = pd.read_csv('examples/ex5.csv')
data
###Output
_____no_output_____
###Markdown
Using DataFrame’s to_csv method, we can write the data out to a comma-separatedfile:
###Code
data.to_csv('examples/out.csv')
!cat examples/out.csv
###Output
,something,a,b,c,d,message
0,one,1,2,3.0,4,
1,two,5,6,,8,world
2,three,9,10,11.0,12,foo
###Markdown
Other delimiters can be used, of course (writing to sys.stdout so it prints the textresult to the console):
###Code
import sys
data.to_csv(sys.stdout, sep='|')
###Output
|something|a|b|c|d|message
0|one|1|2|3.0|4|
1|two|5|6||8|world
2|three|9|10|11.0|12|foo
###Markdown
Missing values appear as empty strings in the output. You might want to denote themby some other sentinel value:
###Code
data.to_csv(sys.stdout, na_rep='NULL')
###Output
,something,a,b,c,d,message
0,one,1,2,3.0,4,NULL
1,two,5,6,NULL,8,world
2,three,9,10,11.0,12,foo
###Markdown
With no other options specified, both the row and column labels are written. Both ofthese can be disabled:
###Code
data.to_csv(sys.stdout, index=False, header=False)
###Output
one,1,2,3.0,4,
two,5,6,,8,world
three,9,10,11.0,12,foo
###Markdown
You can also write only a subset of the columns, and in an order of your choosing:
###Code
data.to_csv(sys.stdout, index=False, columns=['a', 'b', 'c'])
###Output
a,b,c
1,2,3.0
5,6,
9,10,11.0
###Markdown
Series also has a to_csv method:
###Code
dates = pd.date_range('1/1/2000', periods=7)
ts = pd.Series(np.arange(7), index=dates)
ts.to_csv('examples/tseries.csv')
!cat examples/tseries.csv
###Output
2000-01-01,0
2000-01-02,1
2000-01-03,2
2000-01-04,3
2000-01-05,4
2000-01-06,5
2000-01-07,6
###Markdown
Working with Delimited FormatsIt’s possible to load most forms of tabular data from disk using functions like pandas.read_table. In some cases, however, some manual processing may be necessary.It’s not uncommon to receive a file with one or more malformed lines that trip upread_table. To illustrate the basic tools, consider a small CSV file:
###Code
!cat examples/ex7.csv
###Output
"a","b","c"
"1","2","3"
"1","2","3"
###Markdown
For any file with a single-character delimiter, you can use Python’s built-in csv module.To use it, pass any open file or file-like object to csv.reader:
###Code
import csv
f = open('examples/ex7.csv')
reader = csv.reader(f)
###Output
_____no_output_____
###Markdown
Iterating through the reader like a file yields tuples of values with any quote charactersremoved:
###Code
for line in reader:
print(line)
###Output
['a', 'b', 'c']
['1', '2', '3']
['1', '2', '3']
###Markdown
From there, it’s up to you to do the wrangling necessary to put the data in the formthat you need it. Let’s take this step by step. First, we read the file into a list of lines:
###Code
with open('examples/ex7.csv') as f:
lines = list(csv.reader(f))
###Output
_____no_output_____
###Markdown
Then, we split the lines into the header line and the data lines:
###Code
header, values = lines[0], lines[1:]
###Output
_____no_output_____
###Markdown
Then we can create a dictionary of data columns using a dictionary comprehensionand the expression zip(*values), which transposes rows to columns:
###Code
data_dict = {h: v for h, v in zip(header, zip(*values))}
data_dict
class my_dialect(csv.Dialect):
lineterminator = '\n'
delimiter = ';'
quotechar = '"'
quoting = csv.QUOTE_MINIMAL
###Output
_____no_output_____
###Markdown
We can also give individual CSV dialect parameters as keywords to csv.reader:
###Code
with open('examples/ex7.csv') as f:
reader = csv.reader(f, delimiter='|')
###Output
_____no_output_____
###Markdown
To write delimited files manually, you can use csv.writer. It accepts an open, writablefile object and the same dialect and format options as csv.reader:
###Code
with open('mydata.csv', 'w') as f:
writer = csv.writer(f, dialect=my_dialect)
writer.writerow(('one', 'two', 'three'))
writer.writerow(('1', '2', '3'))
writer.writerow(('4', '5', '6'))
writer.writerow(('7', '8', '9'))
!cat mydata.csv
###Output
one;two;three
1;2;3
4;5;6
7;8;9
###Markdown
JSON DataJSON (short for JavaScript Object Notation) has become one of the standard formatsfor sending data by HTTP request between web browsers and other applications. It isa much more free-form data format than a tabular text form like CSV. Here is anexample:
###Code
obj = """
{"name": "Wes",
"places_lived": ["United States", "Spain", "Germany"],
"pet": null,
"siblings": [{"name": "Scott", "age": 30, "pets": ["Zeus", "Zuko"]},
{"name": "Katie", "age": 38,
"pets": ["Sixes", "Stache", "Cisco"]}]
}
"""
import json
result = json.loads(obj)
result
asjson = json.dumps(result)
siblings = pd.DataFrame(result['siblings'], columns=['name', 'age'])
siblings
!cat examples/example.json
data = pd.read_json('examples/example.json')
data
print(data.to_json())
print(data.to_json(orient='records'))
###Output
_____no_output_____
###Markdown
XML and HTML: Web Scraping conda install lxmlpip install beautifulsoup4 html5lib
###Code
tables = pd.read_html('examples/fdic_failed_bank_list.html')
len(tables)
failures = tables[0]
failures.head()
close_timestamps = pd.to_datetime(failures['Closing Date'])
close_timestamps.dt.year.value_counts()
###Output
_____no_output_____
###Markdown
Parsing XML with lxml.objectify 373889 Metro-North Railroad Escalator Availability Percent of the time that escalators are operational systemwide. The availability rate is based on physical observations performed the morning of regular business days only. This is a new indicator the agency began reporting in 2009. 2011 12 Service Indicators M U % 1 97.00 97.00
###Code
from lxml import objectify
path = 'datasets/mta_perf/Performance_MNR.xml'
parsed = objectify.parse(open(path))
root = parsed.getroot()
data = []
skip_fields = ['PARENT_SEQ', 'INDICATOR_SEQ',
'DESIRED_CHANGE', 'DECIMAL_PLACES']
for elt in root.INDICATOR:
el_data = {}
for child in elt.getchildren():
if child.tag in skip_fields:
continue
el_data[child.tag] = child.pyval
data.append(el_data)
perf = pd.DataFrame(data)
perf.head()
from io import StringIO
tag = '<a href="http://www.google.com">Google</a>'
root = objectify.parse(StringIO(tag)).getroot()
root
root.get('href')
root.text
###Output
_____no_output_____
###Markdown
Binary Data Formats
###Code
frame = pd.read_csv('examples/ex1.csv')
frame
frame.to_pickle('examples/frame_pickle')
pd.read_pickle('examples/frame_pickle')
!rm examples/frame_pickle
###Output
_____no_output_____
###Markdown
Using HDF5 Format
###Code
frame = pd.DataFrame({'a': np.random.randn(100)})
store = pd.HDFStore('mydata.h5')
store['obj1'] = frame
store['obj1_col'] = frame['a']
store
store['obj1']
store.put('obj2', frame, format='table')
store.select('obj2', where=['index >= 10 and index <= 15'])
store.close()
frame.to_hdf('mydata.h5', 'obj3', format='table')
pd.read_hdf('mydata.h5', 'obj3', where=['index < 5'])
os.remove('mydata.h5')
###Output
_____no_output_____
###Markdown
Reading Microsoft Excel Files
###Code
xlsx = pd.ExcelFile('examples/ex1.xlsx')
pd.read_excel(xlsx, 'Sheet1')
frame = pd.read_excel('examples/ex1.xlsx', 'Sheet1')
frame
writer = pd.ExcelWriter('examples/ex2.xlsx')
frame.to_excel(writer, 'Sheet1')
writer.save()
frame.to_excel('examples/ex2.xlsx')
!rm examples/ex2.xlsx
###Output
_____no_output_____
###Markdown
Interacting with Web APIs
###Code
import requests
url = 'https://api.github.com/repos/pandas-dev/pandas/issues'
resp = requests.get(url)
resp
data = resp.json()
data[0]['title']
issues = pd.DataFrame(data, columns=['number', 'title',
'labels', 'state'])
issues
###Output
_____no_output_____
###Markdown
Interacting with Databases
###Code
import sqlite3
query = """
CREATE TABLE test
(a VARCHAR(20), b VARCHAR(20),
c REAL, d INTEGER
);"""
con = sqlite3.connect('mydata.sqlite')
con.execute(query)
con.commit()
data = [('Atlanta', 'Georgia', 1.25, 6),
('Tallahassee', 'Florida', 2.6, 3),
('Sacramento', 'California', 1.7, 5)]
stmt = "INSERT INTO test VALUES(?, ?, ?, ?)"
con.executemany(stmt, data)
con.commit()
cursor = con.execute('select * from test')
rows = cursor.fetchall()
rows
cursor.description
pd.DataFrame(rows, columns=[x[0] for x in cursor.description])
import sqlalchemy as sqla
db = sqla.create_engine('sqlite:///mydata.sqlite')
pd.read_sql('select * from test', db)
!rm mydata.sqlite
###Output
_____no_output_____ |
checking-revoked-merged-single-cell-fastqs.ipynb | ###Markdown
Introduction Jessica revoked the merged experiments on test and I wanted to double check they were correct.
###Code
import pandas
import os
import sys
HTSW=os.path.expanduser('~/proj/htsworkflow')
if HTSW not in sys.path:
sys.path.append(HTSW)
from htsworkflow.submission import encoded
test = encoded.ENCODED('test.encodedcc.org')
revoked = [['/files/ENCFF710CRO/', 'released', 'revoked'],
['/replicates/85436d7f-2d97-4da1-8e55-92a5193bea7b/', 'released', 'revoked'],
['/experiments/ENCSR881ZYX/', 'released', 'revoked'],
['/replicates/320f110c-db6e-4642-9d8a-b7d9164a9d91/', 'released', 'revoked'],
['/experiments/ENCSR559CDN/', 'released', 'revoked'],
['/files/ENCFF949JJP/', 'released', 'revoked'],
['/replicates/c1f60632-78d5-4f01-94a1-8d3b54091296/', 'released', 'revoked'],
['/experiments/ENCSR062KGY/', 'released', 'revoked'],
['/files/ENCFF688OVJ/', 'released', 'revoked'],
['/experiments/ENCSR311IKT/', 'released', 'revoked'],
['/files/ENCFF138HWE/', 'released', 'revoked'],
['/replicates/2ff52213-39a4-4b01-87b7-285a3afce51b/', 'released', 'revoked'],
['/experiments/ENCSR839DYB/', 'released', 'revoked'],
['/replicates/d7d375ec-44f8-49e9-aebe-91cf9482aa18/', 'released', 'revoked'],
['/files/ENCFF738JJC/', 'released', 'revoked'],
['/replicates/f607afde-79e9-42c6-934b-c8c20f3b7bd7/', 'released', 'revoked'],
['/experiments/ENCSR652JLT/', 'released', 'revoked'],
['/files/ENCFF255BRR/', 'released', 'revoked'],
['/experiments/ENCSR182LFI/', 'released', 'revoked'],
['/files/ENCFF653CRU/', 'released', 'revoked'],
['/replicates/44f6db89-7ab7-421f-ba0c-d0debf5f20dd/', 'released', 'revoked'],
['/files/ENCFF033UGC/', 'released', 'revoked'],
['/replicates/d8338392-2454-41e9-b194-7cd6b7c6a914/', 'released', 'revoked'],
['/experiments/ENCSR723FBU/', 'released', 'revoked'],
]
for r in revoked:
accession = r[0]
if r[0].startswith('/experiments'):
obj = test.get_json(r[0])
print(obj['accession'], obj['description'])
for r in revoked:
accession = r[0]
if r[0].startswith('/files'):
obj = test.get_json(r[0])
print(obj['accession'], obj['submitted_file_name'])
###Output
_____no_output_____ |
content/Chapter_14/05_Confidence_Intervals.ipynb | ###Markdown
Confidence Intervals Suppose you have a large i.i.d. sample $X_1, X_2, \ldots, X_n$, and let $\bar{X}_n$ be the sample mean. The CLT implies that with chance about 95%, the sample mean is within 2 SDs of the population mean:$$P\big{(}\bar{X}_n \in (\mu - 2\frac{\sigma}{\sqrt{n}}, ~~~ \mu + 2\frac{\sigma}{\sqrt{n}}) \big{)} ~ \approx ~~ 0.95$$
###Code
# NO CODE
Plot_norm(x_limits=(-4, 4), mu=0, sigma=1, left_end=-2, right_end=2)
plt.yticks(np.arange(0, 0.401, 0.05), np.array(7*['']))
plt.xticks(np.arange(-4, 4.1),['','','$\mu - 2\sigma/\sqrt{n}$', '', '$\mu$', '', '$\mu+2\sigma/\sqrt{n}$',''])
plt.xlabel('Sample Mean')
plt.title('Gold Area: Approximately 95%');
###Output
_____no_output_____
###Markdown
This can be expressed in a different way:$$P\big{(}\vert \bar{X}_n - \mu \vert < 2\frac{\sigma}{\sqrt{n}}\big{)} ~ \approx ~~ 0.95$$Distance is symmetric, so this is the same as saying:$$P\big{(}\mu \in (\bar{X}_n - 2\frac{\sigma}{\sqrt{n}}, ~~~ \bar{X}_n + 2\frac{\sigma}{\sqrt{n}})\big{)} ~ \approx ~~ 0.95$$That is why the interval "sample mean $\pm$ 2 measures of spread" is used as an interval of estimates of $\mu$. Inverse of the Standard Normal CDF The interval $\bar{X}_n \pm ~ 2 \sigma/\sqrt{n}$ is called *an approximate 95% confidence interval for the parameter $\mu$*, the population mean. The interval has a *confidence level* of 95%. The level determines the use of $z = 2$ as the multiplier of the SD of the sample mean.You could choose a different confidence level, say 80%. With that choice you would expect the interval to be narrower. To find out exactly how many SDs you have to go on either side of the center to pick up a central area of about 80%, you have to find the corresponding $z$ on the standard normal curve, as shown below.
###Code
# NO CODE
Plot_norm(x_limits=(-4, 4), mu=0, sigma=1, left_end=-1.28, right_end=1.28)
plt.yticks(np.arange(0, 0.401, 0.05), np.array(7*['']))
plt.xticks(make_array(-1.28, 0, 1.28),['$-z$', '0', '$z$'])
plt.title('Gold Area: Approximately 80%');
###Output
_____no_output_____
###Markdown
As you know from Data 8 and can see in the figure, the interval runs from the 10th to the 90th percentile of the distribution. So $z$ is the 90th percentile of the standard normal curve, also known as the "90 percent point" of the curve. The `scipy` method is therefore called `ppf` and takes a decimal value as its argument.
###Code
stats.norm.ppf(.9)
###Output
_____no_output_____
###Markdown
Therefore an approximate 80% confidence interval for the population mean $\mu$ is given by "sample mean $\pm ~ 1.28\sigma/\sqrt{n}$".Let's double check that 2 is a good choice of $z$ for a 95% interval. The $z$ that we need is the 97.5 percent point:
###Code
stats.norm.ppf(.975)
###Output
_____no_output_____
###Markdown
That's $z = 1.96$, which we have been calling 2. It's good enough, but $z = 1.96$ is also commonly used for constructing 95% confidence intervals.The `ppf` and `cdf` functions are inverses of each other.
###Code
stats.norm.cdf(1.96), stats.norm.ppf(0.975)
###Output
_____no_output_____
###Markdown
In math notation,$$\Phi(z) ~ = ~ p ~~ \iff ~~ \Phi^{-1}(p) = z$$ Confidence Interval for Population Mean Let $\lambda$% be any confidence level. Let $z_\lambda$ be the point such that the interval $(-z_\lambda, ~ z_\lambda)$ contains $\lambda$% of the area under the standard normal curve. In our example above, $\lambda$ was 80 and $z_\lambda$ was 1.28. Let $p = \lambda/100$ be the value of $\lambda$ converted into a proportion. For example if $\lambda = 80$ then $p = 0.8$. Then$$z_\lambda ~ = ~ \Phi^{-1}(p + 0.5(1-p))$$because all of the area to the left of $z_\lambda$ is the area $p$ between $z_\lambda$ and $-z_\lambda$ plus the tail to the left of $-z_\lambda$. If $n$ is large,$$p ~ \approx ~ P\big{(}\mu \in (\bar{X}_{n} - z_{\lambda} \frac{\sigma}{\sqrt{n}}, ~~~ \bar{X}_n + z_\lambda \frac{\sigma}{\sqrt{n}})\big{)}$$The random interval $\bar{X}\_{n} ~ \pm ~ z\_{\lambda} \sigma/\sqrt{n}$ is called *an approximate $\lambda$% confidence interval for the population mean $\mu$*. There is about a $\lambda$% chance that this random interval contains the parameter $\mu$.The only difference between confidence intervals of different levels is the choice of $z_\lambda$ which depends on the level $\lambda$. The other two components are the sample mean and its SD. A Data 8 Example Revisited Let's return to an example very familiar from Data 8: a random sample of 1,174 pairs of mothers and their newborns.
###Code
baby = Table.read_table('baby.csv')
baby
###Output
_____no_output_____
###Markdown
The third column consists of the ages of the mothers. Let's construct an approximate 95% confidence interval for the mean age of mothers in the population. We did this in Data 8 using the bootstrap, so we will be able to compare results.We can apply the methods of this section because our data come from a large random sample.
###Code
ages = baby.column('Maternal Age')
samp_mean = np.mean(ages)
samp_mean
n = baby.num_rows
n
###Output
_____no_output_____
###Markdown
The observed value of $\bar{X}_n$ in the sample is 27.23 years. We know that $n = 1174$, so all we need is the population SD $\sigma$ and then we can complete our calculation.But of course we don't know the population SD $\sigma$. We only have a sample.As data scientists, we are used to lifting ourselves by our own bootstraps. Notice that the SD of the sample mean is $\sigma/\sqrt{n}$. If we estimate $\sigma$ by the SD of the data, there will be some error in the estimate but the error will be divided by $\sqrt{n}$ and therefore won't have much effect. That means we can use "sample SD divided by $\sqrt{n}$" as an estimate of $\sigma/\sqrt{n}$. The sample SD, our estimate of $\sigma$, is about 5.82 years.
###Code
sigma_estimate = np.std(ages)
sigma_estimate
###Output
_____no_output_____
###Markdown
An approximate 95% confidence interval for the mean birth weight of babies in the population is $(26.89, 27.57)$ years.
###Code
sd_sample_mean = sigma_estimate/(n ** 0.5)
ci_95_pop_mean = samp_mean + 1.96 * make_array(-1, 1) * sd_sample_mean
ci_95_pop_mean
###Output
_____no_output_____
###Markdown
No bootstrapping required! Now let's compare our interval to the interval we got in Data 8 by using the bootstrap percentile method. Here is the function `bootstrap_mean` from Data 8.
###Code
def bootstrap_mean(original_sample, label, replications):
"""Displays approximate 95% confidence interval for population mean.
original_sample: table containing the original sample
label: label of column containing the variable
replications: number of bootstrap samples
"""
just_one_column = original_sample.select(label)
n = just_one_column.num_rows
means = make_array()
for i in np.arange(replications):
bootstrap_sample = just_one_column.sample()
resampled_mean = np.mean(bootstrap_sample.column(0))
means = np.append(means, resampled_mean)
left = percentile(2.5, means)
right = percentile(97.5, means)
resampled_means = Table().with_column(
'Bootstrap Sample Mean', means
)
resampled_means.hist(bins=15)
print('Approximate 95% confidence interval for population mean:')
print(np.round(left, 2), 'to', np.round(right, 2))
plt.plot(make_array(left, right), make_array(0, 0), color='yellow', lw=8);
###Output
_____no_output_____
###Markdown
Let's construct a bootstrap 95% confidence interval for the population mean. We will use 5000 bootstrap samples as we did in Data 8.
###Code
bootstrap_mean(baby, 'Maternal Age', 5000)
###Output
Approximate 95% confidence interval for population mean:
26.89 to 27.56
###Markdown
The bootstrap confidence interval is essentially identical to the interval (26.89, 27.57) that we got by using the normal approximation. As we did in Data 8, let's observe that the distribution of maternal ages in the sample is far from normal:
###Code
baby.select('Maternal Age').hist()
###Output
_____no_output_____ |
BioPy(trial).ipynb | ###Markdown
Kak perwyj szag my dolzny prowerit naszu sekwencju na prawilnost wczitywania, to jest wse li w naszem liste otweczaet normam wpisywania
###Code
# Check the sequance to make sure it is a DNA String
def validateSeq(dna_seq):
tmpseq = dna_seq
for nuc in tmpseq:#compare all elements in list
if nuc not in nucleotides: #In case DNA seq is not good, will be return bool value
return False
return tmpseq
nucleotides = ['A','C','G','T'] #test var
print(validateSeq(rndDNASer)) #Validation of DNA seq, in case False on output, DNA seq is not valid
randDNAStr = ''.join([random.choice(nucleotides) #lets create our random seq
for nuc in range(20)])
print(validateSeq(randDNAStr))
#create a function,which will count nucleotides number in DNA seq
def counrNucFrequency(seq):
'''DNA seq nucleotides counter '''
tmpFreqDict = {"A":0,"C":0,"G":0,'T':0}
for nuc in seq:
tmpFreqDict[nuc] += 1
return tmpFreqDict
DNAStr = validateSeq(randDNAStr)
print(counrNucFrequency(DNAStr))
randDNAStr = ''.join([random.choice(nucleotides) #postrokenie randomnoj sekwencji
for nuc in range(20)])
DNAStr = validateSeq(randDNAStr)
print(counrNucFrequency(DNAStr))
def CounrNucFrequency(seq):
return dict(collections.Counter(seq))
print(CounrNucFrequency(DNAStr))
def trancription(seq):
"""the function is responsible for transription process, in.oth.h. DNA to RNA"""
return seq.replace('T','U')
print(trancription(DNAStr))
print(f'\nSequance : {DNAStr}\n')
print(f'[1]) + Sequance Length: {len(DNAStr)}\n')
print(f'[2]) + Nucleotide Frequaency: {counrNucFrequency(DNAStr)}\n')
print(f'[3] + DNA/RNA Transcription : {trancription(DNAStr)}\n')
DNA_ReverseComplement = {
'A':'T',
'T':'A',
'G':'C',
'C':'G'
}
#Function for complamentetion principo
def reverse_complement(seq):
"""Swaping adenine with thymine and guanine with ctosine. Reversubg newly generated string"""
return ''.join([DNA_ReverseComplement[nuc] for nuc in seq])[::-1]#[::-1] for reversing of the
tempStr = 'Test' #reversing our str
print(tempStr[::-1])
print(reverse_complement(DNAStr))
print(f"[4] + DNA string + Reverse Complement:\n5' {DNAStr} 3'")
print(f" {''.join(['|' for c in range(len(DNAStr))])}")
print(f"3' {reverse_complement(DNAStr)} 5'\n")
print(f'\nSequance : {DNAStr}\n')
print(f'[1]) + Sequance Length: {len(DNAStr)}\n')
print(f'[2]) + Nucleotide Frequaency: {counrNucFrequency(DNAStr)}\n')
print(f'[3] + DNA/RNA Transcription : {trancription(DNAStr)}\n')
print(f"[4] + DNA string + Reverse Complement:\n5' {DNAStr} 3'")
print(f" {''.join(['|' for c in range(len(DNAStr))])}")
print(f"3' {reverse_complement(DNAStr)} 5'\n")
from struct import *
print(f'\nSequance : {DNAStr}\n')
print(f'[1]) + Sequance Length: {len(DNAStr)}\n')
print(f'[2]) + Nucleotide Frequaency: {counrNucFrequency(DNAStr)}\n')
print(f'[3] + DNA/RNA Transcription : {trancription(DNAStr)}\n')
print(f"[4] + DNA string + Reverse Complement:\n5' {DNAStr} 3'")
print(f" {''.join(['|' for c in range(len(DNAStr))])}")
print(f"3' {reverse_complement(DNAStr)} 5'\n")
nucleotides = ['A','C','G','T']
DNA_ReverseComplement = {
'A':'T',
'T':'A',
'G':'C',
'C':'G'
}
#utilites
def colored(seq):
bcolors = {
"A": '\033[92m',
'C': '\033[94m',
'G': '\033[93m',
'T': '\033[91m',
'U': '\033[91m',
'reset': '\033[0;0m'
}
tmpStr = ""
for nuc in seq:
if nuc in bcolors:
tmpStr +=bcolors[nuc] + nuc
else:
tmpStr +=bcolors['reset'] + nuc
return tmpStr + '\033[0;0m'
print(f'\nSequance : {colored(DNAStr)}\n')
print(f'[1]) + Sequance Length: {len(DNAStr)}\n')
print(f'[2]) + Nucleotide Frequaency: {colored(counrNucFrequency(DNAStr))}\n')
print(f'[3] + DNA/RNA Transcription : {colored(trancription(DNAStr))}\n')
print(f"[4] + DNA string + Reverse Complement:\n5' {colored(DNAStr)} 3'")
print(f" {''.join(['|' for c in range(len(DNAStr))])}")
print(f"3' {colored(reverse_complement(DNAStr))} 5'\n")
def gc_content(seq):
"""GC content in DNA/RNA sequance"""
return round((seq.count('C')+seq.count('G'))/len(seq)*100)
print(gc_content(DNAStr))
print(f'[5] +GC content: {gc_content(DNAStr)} % \n')
print(f'\nSequance : {colored(DNAStr)}\n')
print(f'[1]) + Sequance Length: {len(DNAStr)}\n')
print(f'[2]) + Nucleotide Frequaency: {colored(counrNucFrequency(DNAStr))}\n')
print(f'[3] + DNA/RNA Transcription : {colored(trancription(DNAStr))}\n')
print(f"[4] + DNA string + Reverse Complement:\n5' {colored(DNAStr)} 3'")
print(f" {''.join(['|' for c in range(len(DNAStr))])}")
print(f"3' {colored(reverse_complement(DNAStr))} 5'\n")
print(f'[5] + GC content: {gc_content(DNAStr)} % \n')
def gc_content_subsec(seq, k =20):
"""GC content in a DNA/RNA sub-sequance lenght k. K = 20 by defolt"""
res = []
for i in range(0,len(seq) - k + 1, k):
subseq = seq[i:i + k]
res.append(gc_content(subseq))
return res
print(f'[6] + GC Content in Subsection k=5: {gc_content_subsec(DNAStr,k =5)}\n')
# from collections import Counter
from collections import Counter
def translate_seq(seq, init_pos=0):
"""Translates a DNA sequence into an aminoacid sequence"""
return [DNA_Codons[seq[pos:pos + 3]] for pos in range(init_pos, len(seq) - 2, 3)]
def codon_usage(seq, aminoacid):
"""Provides the frequency of each codon encoding a given aminoacid in a DNA sequence"""
tmpList = []
for i in range(0, len(seq) - 2, 3):
if DNA_Codons[seq[i:i + 3]] == aminoacid:
tmpList.append(seq[i:i + 3])
freqDict = dict(Counter(tmpList))
totalWight = sum(freqDict.values())
for seq in freqDict:
freqDict[seq] = round(freqDict[seq] / totalWight, 2)
return freqDict
DNA_Codons = {
# 'M' - START, '_' - STOP
"GCT": "A", "GCC": "A", "GCA": "A", "GCG": "A",
"TGT": "C", "TGC": "C",
"GAT": "D", "GAC": "D",
"GAA": "E", "GAG": "E",
"TTT": "F", "TTC": "F",
"GGT": "G", "GGC": "G", "GGA": "G", "GGG": "G",
"CAT": "H", "CAC": "H",
"ATA": "I", "ATT": "I", "ATC": "I",
"AAA": "K", "AAG": "K",
"TTA": "L", "TTG": "L", "CTT": "L", "CTC": "L", "CTA": "L", "CTG": "L",
"ATG": "M",
"AAT": "N", "AAC": "N",
"CCT": "P", "CCC": "P", "CCA": "P", "CCG": "P",
"CAA": "Q", "CAG": "Q",
"CGT": "R", "CGC": "R", "CGA": "R", "CGG": "R", "AGA": "R", "AGG": "R",
"TCT": "S", "TCC": "S", "TCA": "S", "TCG": "S", "AGT": "S", "AGC": "S",
"ACT": "T", "ACC": "T", "ACA": "T", "ACG": "T",
"GTT": "V", "GTC": "V", "GTA": "V", "GTG": "V",
"TGG": "W",
"TAT": "Y", "TAC": "Y",
"TAA": "_", "TAG": "_", "TGA": "_"
}
print(f'[5] + GC Content: {gc_content(DNAStr)}%\n')
print(
f'[6] + GC Content in Subsection k=5: {gc_content_subsec(DNAStr, k=5)}\n')
print(
f'[7] + Aminoacids Sequence from DNA: {translate_seq(DNAStr, 0)}\n')
print(
f'[8] + Codon frequency (L): {codon_usage(DNAStr, "L")}\n')
from Bio.Seq import Seq
from Bio.SeqUtils import GC
import pandas as pd
def readFile(filePath):
'''Reading the file and ruturn list of lines'''
with open(filePath,'r') as f:
return[l.strip() for l in f.readlines()]
def gc_content(seq):
'''Retur GC content in aa DNA/RNA sequance'''
return ((seq.count('C')+seq.count('G')/len(seq)*100))
#=== Clean/Prepara the data (Format for ease of you with our GC_content func)
#Converting FASTA/List file data into a dictionary
for line in FASTAFile:
if '@' in line:
FASTALabel = line
FASTADict[FASTALabel] = ''
else:
FASTADict[FASTALabel] += line
for line in FASTADict:
if '+' in FASTADict[()]:
###Output
_____no_output_____ |
archived/programming/python/Python_Algorithms.ipynb | ###Markdown
ref- https://github.com/keon/algorithms 1) Array 1-1) flatten
###Code
# given [2, 1, [3, [4, 5], 6], 7, [8]]
# output [2, 1, 3, 4, 5, 6, 7, 8]
def list_flatten(l, a=None):
print ('a = ', a)
a = list(a) if isinstance(a, (list, tuple)) else []
for i in l:
#print ('i = ', i)
if isinstance(i, (list, tuple)):
a = list_flatten(i, a)
else:
a.append(i)
return a
given = [2, 1, [3, [4, 5], 6], 7, [8]]
list_flatten(given)
###Output
a = None
a = [2, 1]
a = [2, 1, 3]
a = [2, 1, 3, 4, 5, 6, 7]
###Markdown
1-2) garage
###Code
# https://github.com/keon/algorithms/blob/master/array/garage.py
# The goal is to "find out the least movement needed to rearrange
# the parking lot from the initial state to the final state."
#Each step we are only allowed tomove a car
# Say the initial state is an array:
# [1,2,3,0,4],
# where 1,2,3,4 are different cars, and 0 is the empty spot.
# And the final state is
# [0,3,2,1,4].
# We can swap 1 with 0 in the initial array to get [0,2,3,1,4] and so on.
# Each step swap with 0 only.
# credit by cyberking-saga
def garage(initial, final):
steps = 0
while initial != final:
zero = initial.index(0)
if zero != final.index(0):
car_to_move = final[zero]
pos = initial.index(car_to_move)
initial[zero], initial[pos] = initial[pos], initial[zero]
else:
for i in range(len(initial)):
if initial[i] != final[i]:
initial[zero], initial[i] = initial[i], initial[zero]
break
steps += 1
return steps
initial = [4, 2, 3, 1, 0]
final = [0, 3, 2, 1, 4]
print("initial:", initial)
print("final:", final)
print(garage(initial, final))
###Output
initial: [4, 2, 3, 1, 0]
final: [0, 3, 2, 1, 4]
4
###Markdown
1-3) longest_non_repeat
###Code
def longest_non_repeat(s):
start, maxlen = 0, 0
used_char = {}
for i, char in enumerate(s):
if char in used_char and start <= used_char[char]:
start = used_char[char] + 1
else:
maxlen = max(maxlen, i-start+1)
used_char[char] = i
output = ''.join( str(x) for x in list(used_char.keys()) )
return maxlen, output
a = "abcabcdefzb"
b = "qweeioplkj"
c = "eeerfevg4e"
longest_non_repeat(a)
###Output
_____no_output_____
###Markdown
1-4) merge_intervals
###Code
# Definition for an interval.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
def merge(intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
out = []
for i in sorted(intervals, key=lambda i: i.start):
if out and i.start <= out[-1].end:
out[-1].end = max(out[-1].end, i.end)
else:
out += i,
return out
def print_intervals(intervals):
res = []
for i in intervals:
res.append('['+str(i.start)+','+str(i.end)+']')
print("".join(res))
given = [[1,99],[2,6],[8,10],[15,18]]
intervals = []
for l, r in given:
intervals.append(Interval(l,r))
print_intervals(intervals)
print_intervals(merge(intervals))
###Output
[1,99][2,6][8,10][15,18]
[1,99]
###Markdown
1-5) missing_ranges
###Code
## find missing ranges between low and high in the given array.
# ex) [3, 5] lo=1 hi=10 => answer: [1->2, 4, 6->10]
def missing_ranges(nums, lo, hi):
res = []
start = lo
for num in nums:
if num < start:
# if countinue, neglect following code in this loop, countiune next loop
continue
if num == start:
start += 1
continue
res.append(get_range(start, num-1))
start = num + 1
#print (start)
if start <= hi:
res.append(get_range(start, hi))
return res
def get_range(n1, n2):
if n1 == n2:
return str(n1)
else:
return str(n1) + "->" + str(n2)
nums = [3, 5, 10, 11, 12, 15, 19]
print("original:", nums)
print("missing range: ", missing_ranges(nums,0,200))
###Output
original: [3, 5, 10, 11, 12, 15, 19]
missing range: ['0->2', '4', '6->9', '13->14', '16->18', '20->200']
###Markdown
1-6) plus_one 1-7) rotate_array
###Code
# example : rotate([1,2,3,4,5,6,7],3) ->
def rotate(nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
n = len(nums)
k = k % n
reverse(nums, 0, n - k - 1)
reverse(nums, n - k, n - 1)
reverse(nums, 0, n - 1)
return nums
def reverse(array, a, b):
while a < b:
array[a], array[b] = array[b], array[a]
a += 1
b -= 1
a = [1, 2, 3, 4, 10, 11, 20, 101021, 1423, 0]
rotate(a, 5)
def my_rotate(array,n):
if n == 0:
return array
else:
length = len(array)
array_ = array[-n:]
array_sub = array[:length- n]
array_rotate = array_+ array_sub
return array_rotate
a = [1, 2, 3, 4, 10, 11, 20, 101021, 1423, 0]
my_rotate(a,5)
###Output
_____no_output_____ |
QC Programming/QFT of simple QPU signal.ipynb | ###Markdown
**QFT of simple QPU signal**
###Code
import numpy as np
# Importing standard Qiskit libraries
from qiskit import QuantumCircuit, transpile, Aer, IBMQ, QuantumRegister, ClassicalRegister, execute, BasicAer
from qiskit.tools.jupyter import *
from qiskit.visualization import *
from ibm_quantum_widgets import *
from qiskit.providers.aer import QasmSimulator
# Loading your IBM Quantum account(s)
provider = IBMQ.load_account()
import math
%matplotlib inline
# Set up the program
signal = QuantumRegister(4, name='signal')
qc = QuantumCircuit(signal)
def main():
## prepare the signal
qc.h(signal);
qc.rz(math.radians(45), signal[0]);
qc.rz(math.radians(90), signal[1]);
qc.rz(math.radians(180), signal[2]);
qc.barrier()
QFT(signal)
def QFT(qreg):
## This QFT implementation is adapted from IBM's sample:
## https://github.com/Qiskit/qiskit-terra/blob/master/examples/python/qft.py
## ...with a few adjustments to match the book QFT implementation exactly
n = len(qreg)
for j in range(n):
for k in range(j):
qc.cu1(-math.pi/float(2**(j-k)), qreg[n-j-1], qreg[n-k-1])
qc.h(qreg[n-j-1])
# Now finish the QFT by reversing the order of the qubits
for j in range(n//2):
qc.swap(qreg[j], qreg[n-j-1])
main()
backend = BasicAer.get_backend('statevector_simulator')
job = execute(qc, backend)
result = job.result()
outputstate = result.get_statevector(qc, decimals=3)
for i,amp in enumerate(outputstate):
if abs(amp) > 0.000001:
prob = abs(amp) * abs(amp)
print('|{}> {} probability = {}%'.format(i, amp, round(prob * 100, 5)))
qc.draw() # draw the circuit
###Output
|2> (-0.924-0.383j) probability = 100.0465%
|
.ipynb_checkpoints/y_label generation-checkpoint.ipynb | ###Markdown
Producing y labels
###Code
import csv
import pandas as pd
with open('labels.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in spamreader:
print(', '.join(row))
###Output
3,pulmonary_fibrosis,0
7,tuberculosis,2
8,pulmonary_fibrosis,0
12,pneumocystis_pneumonia,2
13,pneumocystis_pneumonia,2
14,sarcoidosis,2
15,tuberculosis,2
17,hypersensitivity_pneumonitis,1
18,hypersensitivity_pneumonitis,1
19,hypersensitivity_pneumonitis,1
21,hypersensitivity_pneumonitis,1
23,acute_interstitial_pneumonia,2
25,cryptogenic_organizing_pneumonia,2
26,cryptogenic_organizing_pneumonia,2
27,cryptogenic_organizing_pneumonia,2
30,cryptogenic_organizing_pneumonia,2
32,tuberculosis,2
34,tuberculosis,2
35,tuberculosis,2
36,tuberculosis,2
37,tuberculosis,2
38,tuberculosis,2
39,tuberculosis,2
40,tuberculosis,2
41,tuberculosis,2
42,hypersensitivity_pneumonitis,1
44,hypersensitivity_pneumonitis,1
45,hypersensitivity_pneumonitis,1
46,hypersensitivity_pneumonitis,1
47,hypersensitivity_pneumonitis,1
48,hypersensitivity_pneumonitis,1
49,hypersensitivity_pneumonitis,1
50,hypersensitivity_pneumonitis,1
51,hypersensitivity_pneumonitis,1
53,pulmonary_fibrosis,0
56,pulmonary_fibrosis,0
57,pulmonary_fibrosis,0
60,pneumocystis_pneumonia,2
62,pulmonary_fibrosis,0
65,pneumocystis_pneumonia,2
66,pneumocystis_pneumonia,2
67,pneumocystis_pneumonia,2
68,pneumocystis_pneumonia,2
70,pneumocystis_pneumonia,2
73,pulmonary_fibrosis,0
74,pulmonary_fibrosis,0
76,pulmonary_fibrosis,0
77,pulmonary_fibrosis,0
78,pulmonary_fibrosis,0
80,pulmonary_fibrosis,0
81,pulmonary_fibrosis,0
82,pulmonary_fibrosis,0
83,desquamative_interstitial_pneumonia,2
84,pulmonary_fibrosis,0
86,pulmonary_fibrosis,0
87,pulmonary_fibrosis,0
89,pulmonary_fibrosis,0
90,pulmonary_fibrosis,0
91,pulmonary_fibrosis,0
92,pulmonary_fibrosis,0
93,pulmonary_fibrosis,0
94,pulmonary_fibrosis,0
101,pulmonary_fibrosis,0
105,cryptogenic_organizing_pneumonia,2
107,sarcoidosis,2
108,nonspecific_interstitial_pneumonia,2
109,respiratory_bronchiolitis_associated_ILD,2
112,hypersensitivity_pneumonitis,1
116,acute_interstitial_pneumonia,2
118,pulmonary_fibrosis,0
119,sarcoidosis,2
120,pulmonary_fibrosis,0
121,pulmonary_fibrosis,0
122,sarcoidosis,2
123,langerhans_cell_histiocytosis,2
124,sarcoidosis,2
126,sarcoidosis,2
127,nonspecific_interstitial_pneumonia,2
128,pulmonary_fibrosis,0
129,sarcoidosis,2
130,sarcoidosis,2
131,sarcoidosis,2
132,sarcoidosis,2
134,cryptogenic_organizing_pneumonia,2
135,cryptogenic_organizing_pneumonia,2
136,pulmonary_fibrosis,0
137,sarcoidosis,2
138,healthy,2
140,tuberculosis,2
142,pulmonary_fibrosis,0
143,acute_interstitial_pneumonia,2
144,pulmonary_fibrosis,0
147,tuberculosis,2
149,sarcoidosis,2
150,eosinophilic_pneumonia,2
152,hypersensitivity_pneumonitis,1
153,hypersensitivity_pneumonitis,1
154,hypersensitivity_pneumonitis,1
155,hypersensitivity_pneumonitis,1
157,hypersensitivity_pneumonitis,1
158,sarcoidosis,2
159,sarcoidosis,2
160,pulmonary_fibrosis,0
162,acute_interstitial_pneumonia,2
163,hypersensitivity_pneumonitis,1
164,hypersensitivity_pneumonitis,1
165,pulmonary_fibrosis,0
166,hypersensitivity_pneumonitis,1
167,pulmonary_fibrosis,0
168,pulmonary_fibrosis,0
169,healthy,2
171,tuberculosis,2
172,lymphocytic_interstitial_pneumonia,2
173,tuberculosis,2
174,sarcoidosis,2
175,pulmonary_fibrosis,0
177,sarcoidosis,2
179,sarcoidosis,2
180,hypersensitivity_pneumonitis,1
181,pulmonary_fibrosis,0
182,sarcoidosis,2
183,sarcoidosis,2
184,pulmonary_fibrosis,0
185,pulmonary_fibrosis,0
|
DEPORTED/API query April deported.ipynb | ###Markdown
> This notebook gathers data from http://memoria.gencat.cat/ca/que-fem/banc-memoria-democratica/fons/deportats-catalans-i-espanyols-als-camps-nazis/ which holds 9_187 entries as of April 1, 2021.
###Code
import requests
import json
# this is the url that's called when we look up a single record
url2 = 'https://dedalo4.bancmemorial.extranet.gencat.cat/dedalo/lib/dedalo/publication/server_api/v1/json/records?code=85df5s$4Kue%C3%B1wQw5O2p4J1G9&lang=lg-cat&table=deportats&count=true§ion_id=1&resolve_portal=true&resolve_portals_custom={%22deportat%22:%22informant%22,%22deportat.biography%22:%22biografia%22,%22exercit_frances%22:%22deportats_treballs_exercit_frances%22,%22camps_francesos%22:%22deportats_camps_francesos%22,%22camps_concentracio%22:%22deportats_camps_concentracio%22,%22consultes_arxius%22:%22deportats_consultes_arxius%22,%22kommando_extern%22:%22deportats_kommando_extern%22,%22publicacions%22:%22referencia_bibliografica%22,%22publicacio_web%22:%22referencia_bibliografica%22,%22empresonament%22:%22deportats_empresonament%22,%22tren_salida%22:%22deportats_trens%22,%22organizacio_todt%22:%22deportats_todt%22,%22indemnitzacions%22:%22deportats_indemnitzacions%22}&ar_fields=section_id,data_mod,deportat,exercit_frances,camps_francesos,camps_concentracio,consultes_arxius,kommando_extern,publicacions,publicacio_web,situacio_deportat,data_situacio,ref_lloc_situacio,unitat_militar,frontera,destinacio,graduacio_militar,data_pas_per_frontera,resistencia,lloc_lluita,cut_lloc_lluita,lloc_lluita_original,lloc_detencio,ref_lloc_detencio,lloc_detencio_original,data_detencio,empresonament,tren_salida,organizacio_todt,ref_observacions,ref_repatriacio_franca,data_repatriacio_franca,data_tornarda_espanya,ref_lloc_residencia_posterior,ref_emigracio,indemnitzacions,obs_deportacio'
req = requests.get(url2)
req
# this is the above url, shortened to request the whole table as an API call
url = 'https://dedalo4.bancmemorial.extranet.gencat.cat/dedalo/lib/dedalo/publication/server_api/v1/json/records?code=85df5s$4Kue%C3%B1wQw5O2p4J1G9&lang=lg-cat&table=deportats'
req = requests.get(url)
req
# assign our data
d = req.json()
# the result key has a list as its value: this is a list of 9187 dictionaries
len(d['result'])
# for example
d['result'][7823]
# save dict (including list of dicts), as json, locally (47MB)
with open ('api_call_01042021.json', 'w') as f:
json.dump(d, f)
###Output
_____no_output_____
###Markdown
Reimport to work locally
###Code
## Reimport locally
with open('api_call_01042021.json') as json_data:
d = json.load(json_data)
# how many items in our data?
len(d)
# what are the keys in our data?
d.keys()
# the result list is the one we want
len(d['result'])
# assign to 'files'
files = d['result']
# look at one
files[8023]
###Output
_____no_output_____ |
lessons/20180529_Machine_Learning_Zsofia_Stefania_Marc/StudyGroup_MachineLearning_Class.ipynb | ###Markdown
Machine Learning applications to a cancer dataset Set-up
###Code
# Import all the necessary packages
import numpy as np
import pandas as pd # for dataframe manipulation
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split # to split the dataset into one train & one test sets
from sklearn.metrics import confusion_matrix # to calculate metrics on the trained classifier and see how well it perfoms on the test dataset
from sklearn.metrics import accuracy_score,precision_score,recall_score # to calculate the accuracy of the trained classifier
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt # for plots
import seaborn as sns; # for plots
###Output
_____no_output_____
###Markdown
We have now installed and loaded the modules necessary for our analyses. Importation of the datasetsThe breast cancer dataset (`breast-cancer-wisconsin.csv`) has 11 columns and 699 rows.Column descriptions: | n° | attribute |Domain||------|------|------|| 1 | Sample number|id number || 2 | Clump thickness|1-10|| 3 | Uniformity of Cell Size|1-10 || 4 | Uniformity of Cell Shape|1-10 || 5 | Marginal Adhesion |1-10 || 6 | Single Epithelial Cell Size|1-10 || 7 | Bare Nuclei |1-10 || 8 | Bland Chromatin |1-10 || 9 | Normal Nucleoli |1-10 || 10 | Mitoses |1-10 || 11 | Class|2,4 |For the cancer class (column "Class"):- 2: benign cancer- 4: malignant cancer
###Code
# import dataset
df = pd.read_csv(filepath_or_buffer="breast-cancer-wisconsin.csv",header=None)
# rename columns
col_names = ["CodeNumber", "ClumpThickness", "UniformityCellSize", "UniformityCellShape", "MarginalAdhesion",
"SingleEpithelialCellSize", "BareNuclei","BlandChromatin", "NormalNucleoli", "Mitoses",
"CancerType"]
df.columns= col_names
df.head()
# drop the BareNuclei column (since it contains missing values coded as "?")
del df["BareNuclei"]
df.head()
###Output
_____no_output_____
###Markdown
Split the original data into a train and a test datasetWe are now going to use our original dataset to:- train a Random Forest (RF) model- test our RF model on a test dataset.We are going to split our dataset (`breast-cancer-wisconsin`) into a train and a test dataset.
###Code
# specify the label (=y) variable (cancer class)
label = df.CancerType
# specify the features (=x) variables (measured variables)
features = df.iloc[:,1:9]
# Split dataset into a random train and test subsets
# We need to indicate which subset corresponds to the features/variables and which column corresponds to the class we are trying to predict.
# Finally, we also indicate the percentage of the dataset to include into the train split (proportion of samples used to train the model)
train_x, test_x, train_y, test_y = train_test_split(features,label,train_size=0.7,test_size=0.3)
# Let's do some "sanity checks"
print("We have " + str(train_x.shape[0]) + " samples in the train dataset ")
print("We have " + str(test_x.shape[0]) + " samples in the test dataset ")
sum_samples = train_x.shape[0] + test_x.shape[0]
print("We have in total " + str(sum_samples) + " in both train and test datasets")
###Output
We have 489 samples in the train dataset
We have 210 samples in the test dataset
We have in total 699 in both train and test datasets
###Markdown
Random Forest classifier Training the classifier
###Code
# Set Random Forest parameters
n_estimators=30
criterion='gini'
max_depth=30
min_samples_split=5
min_samples_leaf=5
max_features='auto'
max_leaf_nodes=None
bootstrap=True
oob_score=True
n_jobs=1
random_state=None
verbose=0
class_weight='balanced'
# build the Random Forest classifier
forest = RandomForestClassifier(n_estimators=n_estimators, criterion=criterion, max_depth=max_depth,
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf,
max_features=max_features, max_leaf_nodes=max_leaf_nodes, bootstrap=bootstrap, oob_score=oob_score,
n_jobs=n_jobs, random_state=random_state, verbose=verbose,class_weight=class_weight)
# train the Random Forest classifier on our dataset
RF_classifier = forest.fit(train_x, train_y)
###Output
_____no_output_____
###Markdown
Benchmarking the performance of the trained RF classifier on the test dataset
###Code
# Validation
mypredtest=RF_classifier.predict(test_x)
print(classification_report(test_y, mypredtest))
# Calculation and graphical representation of the confusion matrix
# definition of a helper function (taken from
# http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html)
# get the target labels (it is an output from the classifier)
target_names=RF_classifier.classes_
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(test_y, mypredtest)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
###Output
Confusion matrix, without normalization
[[126 8]
[ 3 73]]
Normalized confusion matrix
[[ 0.94 0.06]
[ 0.04 0.96]]
###Markdown
Getting the best predictors of the cancer class (benign=2/malign=4)
###Code
# Get feature importance
importances=RF_classifier.feature_importances_
indices = np.argsort(importances)[::-1]
# Get the name of each features
feature_list=np.array(features.columns[0:9])
# Visualize
plt.figure()
plt.title("Feature importances")
plt.bar(range(train_x.shape[1]), importances[indices],
color="r", align="center")
plt.xticks(range(train_x.shape[1]), feature_list[indices],rotation=45,horizontalalignment='right')
plt.xlim([-1, train_x.shape[1]])
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
code/metr-select-parameters.ipynb | ###Markdown
Step 1: Create dataframes of all vars
###Code
# CHNAGE THE BELOW REFORM JSON and YEAR TO DESIRED ANALYSIS FRAMEWORK
cyr = 2030
rec = Records()
pol = Policy()
pol.implement_reform(Policy.read_json_reform('../reforms/biden-iitax-reforms.json'))
calc= Calculator(pol, rec)
calc.advance_to_year(cyr)
calc.calc_all()
df = calc.dataframe([], all_vars=True)
# create variables needed in tau_nc construction outside of loop
df['net_bus_inc'] = (df['e02000'] - df['e26270']) + df['e00900p']
df['mtrC'] = np.where((df['c04800'] > 0) & (df['net_bus_inc'] > 0), calc.mtr(variable_str='e00900p', wrt_full_compensation=False)[1], 0)
df['mtrE1'] = np.where((df['c04800'] > 0) & (df['net_bus_inc'] > 0), calc.mtr(variable_str='e02000', wrt_full_compensation=False)[1], 0)
df['mtrE2'] = np.where((df['c04800'] > 0) & (df['net_bus_inc'] > 0), calc.mtr(variable_str='e26270', wrt_full_compensation=False)[1], 0)
df['mtrE'] = df['mtrE1'] - df['mtrE2']
df['mtr_net_bus_inc'] = df['mtrE'] + df['mtrC']
df['weight_net_bus_inc'] = np.where((df['c04800'] > 0) & (df['net_bus_inc']>0), df['net_bus_inc']/(df['net_bus_inc'].sum()),0)
df['w_net_bus_inc'] = np.where((df['c04800'] > 0) & (df['net_bus_inc']>0), df['mtr_net_bus_inc'] * df['weight_net_bus_inc'],0)
# loop for rest of variables
def make_mtrs(df, var):
df['mtr_' + var] = np.where((df['c04800'] > 0) & (df[var]>0), calc.mtr(variable_str=var, wrt_full_compensation=False)[1], 0)
df['weight_' + var] = np.where((df['c04800'] > 0) & (df[var]>0), df[var]/(df[var].sum()),0)
df['w_' + var] = np.where((df['c04800'] > 0) & (df[var]>0), df['mtr_' + var] * df['weight_' + var],0)
return df
for var in ['e00650', 'e00300', 'p22250', 'p23250', 'e01700','e00200p']:
make_mtrs(df, var)
###Output
_____no_output_____
###Markdown
Step 2: Create tau parameters from dataframe
###Code
# create weights
df = df.assign(wage_weight = (df['e00200p'] * df['s006']) / sum(df['e00200p'] * df['s006']))
df = df.assign(interest_weight = (df['e00300'] * df['s006']) / sum(df['e00300'] * df['s006']))
df = df.assign(dividend_weight = (df['e00650'] * df['s006']) / sum(df['e00650'] * df['s006']))
df = df.assign(ltgains_weight = (df['p23250'] * df['s006']) / sum(df['p23250'] * df['s006']))
df = df.assign(stgains_weight = (df['p22250'] * df['s006']) / sum(df['p22250'] * df['s006']))
df = df.assign(pension_weight = (df['e01700'] * df['s006']) / sum(df['e01700'] * df['s006']))
df = df.assign(business_weight = (df['net_bus_inc'] * df['s006']) / sum(df['net_bus_inc'] * df['s006']))
#
df.loc[(df['e00200p'] > 0) & (df['c04800'] > 0), 'wage_weight'] = (df.loc[(df['e00200p'] > 0) & (df['c04800'] > 0), 'e00200p'] * df.loc[(df['e00200p'] > 0) & (df['c04800'] > 0), 's006']) \
/ sum(df.loc[(df['e00200p'] > 0) & (df['c04800'] > 0), 'e00200p'] * df.loc[(df['e00200p'] > 0) & (df['c04800'] > 0), 's006'])
df.loc[(df['e00300'] > 0) & (df['c04800'] > 0), 'interest_weight'] = (df.loc[(df['e00300'] > 0) & (df['c04800'] > 0), 'e00300'] * df.loc[(df['e00300'] > 0) & (df['c04800'] > 0), 's006']) \
/ sum(df.loc[(df['e00300'] > 0) & (df['c04800'] > 0), 'e00300'] * df.loc[(df['e00300'] > 0) & (df['c04800'] > 0), 's006'])
df.loc[(df['e00650'] > 0) & (df['c04800'] > 0), 'dividend_weight'] = (df.loc[(df['e00650'] > 0) & (df['c04800'] > 0), 'e00650'] * df.loc[(df['e00650'] > 0) & (df['c04800'] > 0), 's006']) \
/ sum(df.loc[(df['e00650'] > 0) & (df['c04800'] > 0), 'e00650'] * df.loc[(df['e00650'] > 0) & (df['c04800'] > 0), 's006'])
df.loc[(df['p23250'] > 0) & (df['c04800'] > 0), 'ltgains_weight'] = (df.loc[(df['p23250'] > 0) & (df['c04800'] > 0), 'p23250'] * df.loc[(df['p23250'] > 0) & (df['c04800'] > 0), 's006']) \
/ sum(df.loc[(df['p23250'] > 0) & (df['c04800'] > 0), 'p23250'] * df.loc[(df['p23250'] > 0) & (df['c04800'] > 0), 's006'])
df.loc[(df['p22250'] > 0) & (df['c04800'] > 0), 'stgains_weight'] = (df.loc[(df['p22250'] > 0) & (df['c04800'] > 0), 'p22250'] * df.loc[(df['p22250'] > 0) & (df['c04800'] > 0), 's006']) \
/ sum(df.loc[(df['p22250'] > 0) & (df['c04800'] > 0), 'p22250'] * df.loc[(df['p22250'] > 0) & (df['c04800'] > 0), 's006'])
df.loc[(df['e01700'] > 0) & (df['c04800'] > 0), 'pension_weight'] = (df.loc[(df['e01700'] > 0) & (df['c04800'] > 0), 'e01700'] * df.loc[(df['e01700'] > 0) & (df['c04800'] > 0), 's006']) \
/ sum(df.loc[(df['e01700'] > 0) & (df['c04800'] > 0), 'e01700'] * df.loc[(df['e01700'] > 0) & (df['c04800'] > 0), 's006'])
df.loc[(df['net_bus_inc'] > 0) & (df['c04800'] > 0), 'business_weight'] = (df.loc[(df['net_bus_inc'] > 0) & (df['c04800'] > 0), 'net_bus_inc'] * df.loc[(df['net_bus_inc'] > 0) & (df['c04800'] > 0), 's006']) \
/ sum(df.loc[(df['net_bus_inc'] > 0) & (df['c04800'] > 0), 'net_bus_inc'] * df.loc[(df['net_bus_inc'] > 0) & (df['c04800'] > 0), 's006'])
# sum of (weight x mtr)
results = {
'tau_wages': sum(df.loc[(df['e00200p'] > 0) & (df['c04800'] > 0), 'mtr_e00200p'] * df.loc[(df['e00200p'] > 0) & (df['c04800'] > 0), 'wage_weight']),
'tau_interest': sum(df.loc[(df['e00300'] > 0) & (df['c04800'] > 0), 'mtr_e00300'] * df.loc[(df['e00300'] > 0) & (df['c04800'] > 0), 'interest_weight']),
'tau_dividends': sum(df.loc[(df['e00650'] > 0) & (df['c04800'] > 0), 'mtr_e00650'] * df.loc[(df['e00650'] > 0) & (df['c04800'] > 0), 'dividend_weight']),
'tau_ltcapgain' : sum(df.loc[(df['p23250'] > 0) & (df['c04800'] > 0), 'mtr_p23250'] * df.loc[(df['p23250'] > 0) & (df['c04800'] > 0), 'ltgains_weight']),
'tau_stcapgain': sum(df.loc[(df['p22250'] > 0) & (df['c04800'] > 0), 'mtr_p22250'] * df.loc[(df['p22250'] > 0) & (df['c04800'] > 0), 'stgains_weight']),
'tau_taxdef': sum(df.loc[(df['e01700'] > 0) & (df['c04800'] > 0), 'mtr_e01700'] * df.loc[(df['e01700'] > 0) & (df['c04800'] > 0), 'pension_weight']),
'tau_businc': sum(df.loc[(df['net_bus_inc'] > 0) & (df['c04800'] > 0), 'mtr_net_bus_inc'] * df.loc[(df['net_bus_inc'] > 0) & (df['c04800'] > 0), 'business_weight'])
}
results
# AS OF 10/01
# 2021 Output -- Biden
{ 'tau_wages': 0.22579741535205383,
'tau_businc': 0.23688645926320534
'tau_dividends': 0.22154782524778652,
'tau_interest': 0.32587731116122715,
'tau_stcapgain': 0.35316762250245715,
'tau_ltcapgain': 0.30317417181222733,
'tau_taxdef': 0.20557529276761868,
# 2030 Output -- Biden
{'tau_wages': 0.2615570975722658,
'tau_businc': 0.28690800605154393}
'tau_dividends': 0.23576880808217743,
'tau_interest': 0.3501197033722555,
'tau_stcapgain': 0.360018242929579,
'tau_ltcapgain': 0.302696552046099,
'tau_taxdef': 0.2519422134315737,
# 2021 Output - Current Law
{'tau_wages': 0.22055633310698056,
'tau_businc': 0.21212348629625527}
'tau_dividends': 0.18144637902683441,
'tau_interest': 0.3045090753423554,
'tau_stcapgain': 0.33172716752431924,
'tau_ltcapgain': 0.21411727367834893,
'tau_taxdef': 0.20432708930036364,
# 2030 Output - Current Law
{'tau_wages': 0.26270810021063296,
'tau_businc': 0.2826226943704139}
'tau_dividends': 0.19913431879864069,
'tau_interest': 0.34051886072557286,
'tau_stcapgain': 0.3519825834480394,
'tau_ltcapgain': 0.21957756590835306,
'tau_taxdef': 0.2517037725957203,
###Output
_____no_output_____ |
Sessions/Session03/Day3/MapReduce.ipynb | ###Markdown
Data Management Part 2: Map Reduce**Version 0.1**Problem 2 has been adapted from a homework developed by Bill Howe at the University of Washington department of Computer Science and Engineering. He says:> In this assignment, you will be designing and implementing MapReduce algorithms for a variety of common data processing tasks. The MapReduce programming model (and a corresponding system) was proposed in a 2004 paper from a team at Google as a simpler abstraction for processing very large datasets in parallel. The goal of this assignment is to give you experience “thinking in MapReduce.” We will be using small datasets that you can inspect directly to determine the correctness of your results and to internalize how MapReduce works.On Friday, we'll do a demo of a MapReduce-based system to process the large datasets for which it was designed.* * * Problem 1: python builtins, map, reduce, and filterRecall yesterday's challenge problem, we define a function that returned true if a triangle was smaller than some threshold and False otherwise. We filtered the triangles as follows:```idx = [isTriangleLargerThan(triangle) for triangle in triangles]onlySmallTriangles = triangles[idx]```You could also do this with the `map` function:```idx = map(isTriangleLargerThan, triangles)onlySmallTriangles = triangles[idx]```or `filter`:```onlySmallTriangles = filter(isTriangleLargerThan, triangles)```The following code example is how we'd use them to compute a sum of 3 partitions. Pretend that the 3 lists are on different nodes. :) _ Note 1) this is operating on a set of values rather than key/value pairs (which we'll introduce in Problem 2).__Note 2) Yes, this is contrived. In real life, you wouldn't go through this trouble to compute a simple sum, but it is a warm up for Problem 2_
###Code
import numpy as np
def mapper(arr):
return np.sum(arr)
def reducer(x, y):
return x + y
a = [1, 12, 3]
b = [4, 12, 6, 3]
c = [8, 1, 12, 11, 12, 2]
inputData = [a, b, c]
# Find the sum of all the numbers:
intermediate = map(mapper, inputData)
reduce(reducer, intermediate)
###Output
_____no_output_____
###Markdown
**Problem 1a**) Re-write the mapper and reducer to return the **maximum** number in all 3 lists.
###Code
def mapper(arr):
# COMPLETE
def reducer(x, y):
# COMPLETE
intermediate = map(mapper, inputData)
reduce(reducer, intermediate)
###Output
_____no_output_____
###Markdown
**Problem 1b)**How would you use this to compute the MEAN of the input data. **Problem 1c)**Think about how you would adapt this this to compute the MEDIAN of the input data. Do not implement it today! If it seems hard, it is because it is.What special properties do SUM, MAX, MEAN have that make it trivial to represent in MapReduce? Problem 2) Let's go through a more complete example. The following MapReduce class faithfully implements the MapReduce programming model, but it executes entirely on one processor -- it does not involve parallel computation. **Setup**First, download the data:```$ curl -O https://lsst-web.ncsa.illinois.edu/~yusra/escience_mr/books.json$ curl -O https://lsst-web.ncsa.illinois.edu/~yusra/escience_mr/records.json```
###Code
DATA_DIR = './data' # Set your path to the data files
import json
import sys
class MapReduce:
def __init__(self):
self.intermediate = {}
self.result = []
def emit_intermediate(self, key, value):
self.intermediate.setdefault(key, [])
self.intermediate[key].append(value)
def emit(self, value):
self.result.append(value)
def execute(self, data, mapper, reducer):
for line in data:
record = json.loads(line)
mapper(record)
for key in self.intermediate:
reducer(key, self.intermediate[key])
jenc = json.JSONEncoder()
for item in self.result:
print(jenc.encode(item))
###Output
_____no_output_____
###Markdown
Here is the word count example discussed in class implemented as a MapReduce program using the framework:
###Code
# Part 1
mr = MapReduce()
# Part 2
def mapper(record):
# key: document identifier
# value: document contents
key = record[0]
value = record[1]
words = value.split()
for w in words:
mr.emit_intermediate(w, 1)
# Part 3
def reducer(key, list_of_values):
# key: word
# value: list of occurrence counts
total = 0
for v in list_of_values:
total += v
mr.emit((key, total))
# Part 4
inputdata = open(os.path.join(DATA_DIR, "books.json"))
mr.execute(inputdata, mapper, reducer)
###Output
_____no_output_____
###Markdown
Probelm 2a)Create an Inverted index. Given a set of documents, an inverted index is a dictionary where each word is associated with a list of the document identifiers in which that word appears.**Mapper Input**The input is a 2 element list: [document_id, text], where document_id is a string representing a document identifier and text is a string representing the text of the document. The document text may have words in upper or lower case and may contain punctuation. You should treat each token as if it was a valid word; that is, you can just use value.split() to tokenize the string.**Reducer Output**The output should be a (word, document ID list) tuple where word is a String and document ID list is a list of Strings like:```["all", ["milton-paradise.txt", "blake-poems.txt", "melville-moby_dick.txt"]]["Rossmore", ["edgeworth-parents.txt"]]["Consumptive", ["melville-moby_dick.txt"]]["forbidden", ["milton-paradise.txt"]]["child", ["blake-poems.txt"]]["eldest", ["edgeworth-parents.txt"]]["four", ["edgeworth-parents.txt"]]["Caesar", ["shakespeare-caesar.txt"]]["winds", ["whitman-leaves.txt"]]["Moses", ["bible-kjv.txt"]]["children", ["edgeworth-parents.txt"]]["seemed", ["chesterton-ball.txt", "austen-emma.txt"]]etc...```
###Code
mr = MapReduce()
def mapper(record):
# COMPELTE
def reducer(key, list_of_values):
# COMPLETE
inputdata = open(os.path.join(DATA_DIR, "books.json"))
mr.execute(inputdata, mapper, reducer)
###Output
_____no_output_____
###Markdown
Challenge ProblemImplement a relational join as a MapReduce queryConsider the following query:```SELECT * FROM Orders, LineItem WHERE Order.order_id = LineItem.order_id```Your MapReduce query should produce the same result as this SQL query executed against an appropriate database. You can consider the two input tables, Order and LineItem, as one big concatenated bag of records that will be processed by the map function record by record.**Map Input**Each input record is a list of strings representing a tuple in the database. Each list element corresponds to a different attribute of the tableThe first item (index 0) in each record is a string that identifies the table the record originates from. This field has two possible values:"line_item" indicates that the record is a line item."order" indicates that the record is an order.* **The second element (index 1) in each record is the `order_id.`** <--- JOIN ON THIS ELEMENTLineItem records have 17 attributes including the identifier string.Order records have 10 elements including the identifier string.**Reduce Output**The output should be a joined record: a single list of length 27 that contains the attributes from the order record followed by the fields from the line item record. Each list element should be a string like ```["order", "32", "130057", "O", "208660.75", "1995-07-16", "2-HIGH", "Clerk000000616", "0", "ise blithely bold, regular requests. quickly unusual dep", "line_item", "32", "82704", "7721", "1", "28", "47227.60", "0.05", "0.08", "N", "O", "1995-10-23", "1995-08-27", "1995-10-26", "TAKE BACK RETURN", "TRUCK", "sleep quickly. req"]["order", "32", "130057", "O", "208660.75", "1995-07-16", "2-HIGH", "Clerk000000616", "0", "ise blithely bold, regular requests. quickly unusual dep", "line_item", "32", "197921", "441", "2", "32", "64605.44", "0.02", "0.00", "N", "O", "1995-08-14", "1995-10-07", "1995-08-27", "COLLECT COD", "AIR", "lithely regular deposits. fluffily "]["order", "32", "130057", "O", "208660.75", "1995-07-16", "2-HIGH", "Clerk000000616", "0", "ise blithely bold, regular requests. quickly unusual dep", "line_item", "32", "44161", "6666", "3", "2", "2210.32", "0.09", "0.02", "N", "O", "1995-08-07", "1995-10-07", "1995-08-23", "DELIVER IN PERSON", "AIR", " express accounts wake according to the"]["order", "32", "130057", "O", "208660.75", "1995-07-16", "2-HIGH", "Clerk000000616", "0", "ise blithely bold, regular requests. quickly unusual dep", "line_item", "32", "2743", "7744", "4", "4", "6582.96", "0.09", "0.03", "N", "O", "1995-08-04", "1995-10-01", "1995-09-03", "NONE", "REG AIR", "e slyly final pac"]```
###Code
mr = MapReduce()
def mapper(record):
# COMPLETE
def reducer(key, list_of_values):
# COMPLETE
inputdata = open(os.path.join(DATA_DIR, "records.json"))
mr.execute(inputdata, mapper, reducer)
###Output
_____no_output_____ |
Inspect_Data/Combine_Seperate_Classification_Files.ipynb | ###Markdown
Combine classification dataThe classification process used to label the drone footage in this project resulted in ~1000 individual classification json files. To perform pre-processing (including validating that the images classified by multiple users were classified in a simlar method), the files are all combined into a single dataframe.This takes a long time (approximately 1 hour on a desktop machine), therefore the resulting dataframe is picked to a file to allow for resumption of validation and inspection work without the need to rerun this time-consuming process
###Code
import pandas as pd
import json
from os import listdir
from os.path import isfile, join
# Function to read the json file and convert the dictionary object in the 'classifiedData' column
# into seperate columns
def load_classification_file(file_name):
data = pd.read_json(file_name)
map_to_dict = data['classifiedData'].map(lambda x : dict(x))
expanded_cols = map_to_dict.apply(pd.Series)
return pd.concat([data, expanded_cols], axis=1).drop('classifiedData', axis=1)
frames = None
# Read all files in the classified sibling folder
filenames = [f for f in listdir('../../Texture_Repo/Donegal_Rural_Terrain_Textures/classified')
if isfile(join('../../Texture_Repo/Donegal_Rural_Terrain_Textures/classified', f))]
for f in filenames:
print('Processing ' + f)
frame = load_classification_file(join('../../Texture_Repo/Donegal_Rural_Terrain_Textures/classified', f))
frames = pd.concat([frames, frame])
# Pickle the combined data frames into the classified folder
frames.to_pickle('../../Texture_Repo/Donegal_Rural_Terrain_Textures/classified/all_data.pkl')
###Output
_____no_output_____ |
examples/99-advanced/osmnx-example.ipynb | ###Markdown
Plot Open Street Map Data {open_street_map_example}=========================This was originally posted to[pyvista/pyvista-support\486](https://github.com/pyvista/pyvista-support/issues/486).Be sure to check out [osmnx](https://github.com/gboeing/osmnx)Start by generating a graph from an address.
###Code
import numpy as np
import osmnx as ox
import pyvista as pv
# Alternatively, use the pickeled graph included in our examples.
from pyvista import examples
###Output
_____no_output_____
###Markdown
Read in the graph directly from the Open Street Map server.
###Code
# address = 'Holzgerlingen DE'
# graph = ox.graph_from_address(address, dist=500, network_type='drive')
# pickle.dump(graph, open('/tmp/tmp.p', 'wb'))
graph = examples.download_osmnx_graph()
###Output
_____no_output_____
###Markdown
Next, convert the edges into pyvista lines using`pyvista.lines_from_points`{.interpreted-text role="func"}.
###Code
nodes, edges = ox.graph_to_gdfs(graph)
lines = []
# convert each edge into a line
for _, row in edges.iterrows():
x_pts = row['geometry'].xy[0]
y_pts = row['geometry'].xy[1]
z_pts = np.zeros(len(x_pts))
pts = np.column_stack((x_pts, y_pts, z_pts))
line = pv.lines_from_points(pts)
lines.append(line)
###Output
_____no_output_____
###Markdown
Finally, merge the lines and plot
###Code
combined_lines = lines[0].merge(lines[1:])
combined_lines.plot(line_width=3, cpos='xy')
###Output
_____no_output_____ |
Over Sampling And Under Sampling.ipynb | ###Markdown
Credit Card Kaggle- Fixing Imbalanced Dataset Over Sampling
###Code
import numpy as np
import pandas as pd
import sklearn
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report,accuracy_score
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from sklearn.svm import OneClassSVM
from pylab import rcParams
rcParams['figure.figsize'] = 14, 8
RANDOM_SEED = 42
LABELS = ["Normal", "Fraud"]
data = pd.read_csv('creditcard.csv',sep=',')
data.head()
data.info()
#Create independent and Dependent Features
columns = data.columns.tolist()
# Filter the columns to remove data we do not want
columns = [c for c in columns if c not in ["Class"]]
# Store the variable we are predicting
target = "Class"
# Define a random state
state = np.random.RandomState(42)
X = data[columns]
Y = data[target]
# Print the shapes of X & Y
print(X.shape)
print(Y.shape)
###Output
(284807, 30)
(284807,)
###Markdown
Exploratory Data Analysis
###Code
data.isnull().values.any()
count_classes = pd.value_counts(data['Class'], sort = True)
count_classes.plot(kind = 'bar', rot=0)
plt.title("Transaction Class Distribution")
plt.xticks(range(2), LABELS)
plt.xlabel("Class")
plt.ylabel("Frequency")
## Get the Fraud and the normal dataset
fraud = data[data['Class']==1]
normal = data[data['Class']==0]
print(fraud.shape,normal.shape)
from imblearn.combine import SMOTETomek
from imblearn.under_sampling import NearMiss
smk = SMOTETomek(random_state=42)
X_res,y_res=smk.fit_sample(X,Y)
X_res.shape,y_res.shape
from collections import Counter
print('Original dataset shape {}'.format(Counter(Y)))
print('Resampled dataset shape {}'.format(Counter(y_res)))
## RandomOverSampler to handle imbalanced data
from imblearn.over_sampling import RandomOverSampler
os = RandomOverSampler(ratio=0.5)
X_train_res, y_train_res = os.fit_sample(X, Y)
X_train_res.shape,y_train_res.shape
print('Original dataset shape {}'.format(Counter(Y)))
print('Resampled dataset shape {}'.format(Counter(y_train_res)))
os_us = SMOTETomek(ratio=0.5)
X_train_res1, y_train_res1 = os_us.fit_sample(X, Y)
X_train_res1.shape,y_train_res1.shape
print('Original dataset shape {}'.format(Counter(Y)))
print('Resampled dataset shape {}'.format(Counter(y_train_res1)))
###Output
Original dataset shape Counter({0: 284315, 1: 492})
Resampled dataset shape Counter({0: 283480, 1: 141322})
###Markdown
Under Sampling
###Code
import numpy as np
import pandas as pd
import sklearn
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report,accuracy_score
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from sklearn.svm import OneClassSVM
from pylab import rcParams
rcParams['figure.figsize'] = 14, 8
RANDOM_SEED = 42
LABELS = ["Normal", "Fraud"]
data = pd.read_csv('creditcard.csv',sep=',')
data.head()
data.info()
#Create independent and Dependent Features
columns = data.columns.tolist()
# Filter the columns to remove data we do not want
columns = [c for c in columns if c not in ["Class"]]
# Store the variable we are predicting
target = "Class"
# Define a random state
state = np.random.RandomState(42)
X = data[columns]
Y = data[target]
X_outliers = state.uniform(low=0, high=1, size=(X.shape[0], X.shape[1]))
# Print the shapes of X & Y
print(X.shape)
print(Y.shape)
###Output
(284807, 30)
(284807,)
###Markdown
Exploratory Data Analysis
###Code
data.isnull().values.any()
count_classes = pd.value_counts(data['Class'], sort = True)
count_classes.plot(kind = 'bar', rot=0)
plt.title("Transaction Class Distribution")
plt.xticks(range(2), LABELS)
plt.xlabel("Class")
plt.ylabel("Frequency")
## Get the Fraud and the normal dataset
fraud = data[data['Class']==1]
normal = data[data['Class']==0]
print(fraud.shape,normal.shape)
from imblearn.under_sampling import NearMiss
nm = NearMiss(random_state=42)
X_res,y_res=nm.fit_sample(X,Y)
X_res.shape,y_res.shape
from collections import Counter
print('Original dataset shape {}'.format(Counter(Y)))
print('Resampled dataset shape {}'.format(Counter(y_res)))
###Output
Original dataset shape Counter({0: 284315, 1: 492})
Resampled dataset shape Counter({0: 492, 1: 492})
|