Unnamed: 0
int64 0
16k
| text_prompt
stringlengths 110
62.1k
| code_prompt
stringlengths 37
152k
|
---|---|---|
14,700 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Loading data
Step1: Add counts for each type into buildings
Step2: Normalize longitude and latitude
Step3: Analysis features
Step4: Cross Validation
Step5: According to this simplified analysis, location has the most predictive power for blightness. ('norm_lat' and 'norm_lon' correspond to 'normalized latitude' and 'normalized longitude' respectively)
More features from differentiating crimes
Step6: Cross Validation
Step7: The additional feature created using differentiated crimes did not provide a better answer.
SVM
Step8: More features from differentiating 'blight violations'
We still have only a very small number of features so far. And one should still benefit from exploring features.
Step9: Cross Validation
Step10: Bagging
More features produced by differentiating kinds of crimes or kinds of violations did not improve our power of predictions.
We can use bagging to take advantage of extra samples not utilized for nonblighted buildings.
Extra data
Step11: Hold test data
Step12: Train data
Step13: One example
Step14: Cross Validation
Step15: Average model
Step16: An AUC score of 0.8625 was achieved by reducing variance. | Python Code:
buildings = pd.read_csv("../data/buildings.csv")
events = pd.read_csv("../data/events.csv")
buildings.head(2)
events.head(2)
events['type'].value_counts() # types: 1: 311-calls, 2: crimes, 3: blight violations
Explanation: Loading data
End of explanation
def str_to_list(events_str):
events_list = events_str.rstrip(']').lstrip('[').split(', ')
return events_list
buildings.loc[:, 'event_id_list'] = buildings.loc[:,'event_id_list'].copy().apply(lambda x: str_to_list(x))
buildings['311-calls'] = np.zeros(buildings.shape[0])
buildings['crimes'] = np.zeros(buildings.shape[0])
buildings['blight_violations'] = np.zeros(buildings.shape[0])
buildings['permit_cnt'] = np.zeros(buildings.shape[0])
for i in range(buildings.shape[0]):
for event in buildings.loc[i, 'event_id_list']:
event = int(event)
event_type = events.loc[event,'type']
if event_type == 1:
buildings.loc[i, '311-calls'] += 1
elif event_type == 2:
buildings.loc[i, 'crimes'] += 1
elif event_type == 3:
buildings.loc[i, 'blight_violations'] += 1
elif event_type == 4:
buildings.loc[i, 'permit_cnt'] += 1
else:
print("unexpected event_type: %d in row %d" % (event_type, i))
buildings[['311-calls','crimes','blight_violations','permit_cnt']].describe()
Explanation: Add counts for each type into buildings
End of explanation
buildings['norm_lon'] = (buildings['lon'].copy() - np.mean(buildings['lon'].values))/np.std(buildings['lon'].values)
buildings['norm_lat'] = (buildings['lat'].copy() - np.mean(buildings['lat'].values))/np.std(buildings['lat'].values)
buildings.head(2)
buildings.to_csv('../data/buildings_with_features.csv', index=False)
Explanation: Normalize longitude and latitude
End of explanation
import xgboost as xgb
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.cross_validation import train_test_split
buildings = pd.read_csv('../data/buildings_with_features.csv')
balanced_data = pd.read_csv('../data/balanced_data.csv')
balanced_keys = pd.read_csv('../data/balanced_keys.csv')
buildings.head(2)
balanced_data.head(3)
balanced_buildings = buildings.loc[buildings['building_id'].isin(balanced_data['building_id'].values)].copy()
feature_names = ['norm_lat', 'norm_lon', '311-calls', 'crimes', 'blight_violations']
feature_types = ['float', 'float', 'int', 'int', 'int', 'int']
names_to_drop = ['building_id','blighted','addr','event_id_list','lat','lon','llcrnrlon','llcrnrlat','urcrnrlon','urcrnrlat', 'permit_cnt']
labels = balanced_buildings['blighted']
data = balanced_buildings.drop(names_to_drop, axis=1, inplace=False)
x_train, x_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, stratify=labels, random_state=0)
x, x_eval, y, y_eval = train_test_split(x_train, y_train, test_size=0.2, stratify=y_train, random_state=500)
dtrain = xgb.DMatrix(x, label=y)
deval = xgb.DMatrix(x_eval, label=y_eval)
dtest = xgb.DMatrix(x_test, label=y_test)
param = {
'booster': 'gbtree',
'subsample': 1.0,
'max_depth': 5,
'min_child_weight': 2,
'eta': 0.2,
'gamma': 3,
'objective':'binary:logistic',
'eval_metric': 'auc',
'lambda': 3, # L2 regularization,
'alpha': 1 # L1 regularization
}
watchlist = [(deval, 'eval'), (dtrain, 'train')]
num_round = 1000
evals_result = {}
bst = xgb.train(param, dtrain, num_round, watchlist, evals_result=evals_result, early_stopping_rounds=20, verbose_eval=False)
plt.plot(np.arange(len(evals_result['eval']['auc'])), evals_result['eval']['auc'], 'b-',\
np.arange(len(evals_result['train']['auc'])), evals_result['train']['auc'], 'r--')
plt.legend(['Eval','Train'], loc='best')
plt.ylim(0.8,1.0)
plt.show()
preds_proba = bst.predict(dtest, ntree_limit=bst.best_ntree_limit)
round(roc_auc_score(y_test, preds_proba), 5)
dtrain_total = xgb.DMatrix(x_train, label=y_train)
Explanation: Analysis features
End of explanation
res = xgb.cv(param, dtrain_total, num_boost_round=10, nfold=5, metrics={'auc'}, seed=99,\
callbacks=[xgb.callback.print_evaluation(show_stdv=False),\
xgb.callback.early_stop(3)])
plt.plot(np.arange(len(preds_proba)), preds_proba, 'bo', alpha=0.2)
plt.show()
y_pred_proba = bst.predict(dtest, ntree_limit=bst.best_ntree_limit)
fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba, pos_label=1)
plt.figure(figsize=(5,5))
plt.plot([0,1],[0,1], 'k--')
plt.plot(fpr, tpr, 'ro',label='RF', alpha=0.4)
plt.title("ROC Curve")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.show()
fig, ax = plt.subplots(figsize=(20,15), dpi=600)
xgb.plot_tree(bst, num_trees=0, ax=ax)
plt.show()
feat_imp = [(k,v) for (k,v) in bst.get_fscore().items()] # python3
feat_imp.sort()
feat_names, feat_imps = zip(*feat_imp)
feat_imp_vis = pd.Series(feat_imps, index=feat_names)
fig = plt.figure()
feat_imp_vis.plot(kind='bar', title='Feature importance')
Explanation: Cross Validation
End of explanation
data_crimes = pd.read_csv('../data/data_crime.csv')
data_crimes.CATEGORY.unique()
crime_categories = {'more_serious': ['ASSAULT', 'LARCENY', 'STOLEN VEHICLE', 'BURGLARY', 'AGGRAVATED ASSAULT',\
'ROBBERY', 'KIDNAPING', 'OTHER BURGLARY', 'NEGLIGENT HOMICIDE', 'JUSTIFIABLE HOMICIDE',\
'FELONY DEATH FROM FLEEING VEHICLE', 'DANGEROUS DRUGS', 'ARSON', 'HOMICIDE'], \
'less_serious': ['WEAPONS OFFENSES', 'TRAFFIC VIOLATIONS-MOTORCYCLE VIOLATIONS', \
'DAMAGE TO PROPERTY', 'TRAFFIC VIOLATIONS-DRIVING ON SUSPENDED', 'FRAUD', 'OBSTRUCTING THE POLICE',\
'RUNAWAY', 'BRIBERY', 'EXTORTION', 'STOLEN PROPERTY', 'HEALTH-SAFETY', 'VAGRANCY (OTHER)', \
'ENVIRONMENT', 'EMBEZZLEMENT', 'FORGERY', 'CONSPIRACY BY COMPUTER', 'ANTITRUST', 'PUBLIC PEACE',\
'LIQUOR', 'OUIL', 'OBSCENITY', 'SOVEREIGNTY', 'TAX REVENUE', 'GAMBLING', 'IMMIGRATION', 'CONGRESS',\
'REVOKED', 'ELECTION LAWS', 'DRUNKENNESS', 'MISCELLANEOUS ARREST', 'MILITARY', 'SOLICITATION', \
'OUIL DISPOSE OF VEHICLE TO AVOID FORFEITURE', 'FAMILY OFFENSE', 'ESCAPE', 'OBSTRUCTING JUDICIARY']}
# based on data from gis.chicagopolice.org
data_crimes.head(2)
def cat_crime(crime_str):
'''numerical category:
---- more_serious: 1
---- less_serious: 0
---- unclassified: -1
'''
if crime_str in crime_categories['more_serious']:
return 1
elif crime_str in crime_categories['less_serious']:
return 0
else:
return -1
data_crimes['num_cat'] = data_crimes['CATEGORY'].apply(cat_crime)
data_crimes['num_cat'].unique() # all crimes classified, no -1 encountered
buildings.head(1) # refresher
less_serious_crime_event_ids = data_crimes.loc[data_crimes['num_cat']==0,'event_id'].values
more_serious_crime_event_ids = data_crimes.loc[data_crimes['num_cat']==1,'event_id'].values
buildings.loc[:, 'event_id_list'] = buildings.loc[:,'event_id_list'].copy().apply(lambda x: str_to_list(x))
buildings['less_serious_crimes'] = 0 # count of less serious crimes
buildings['more_serious_crimes'] = 0 # count of more serious crimes
buildings['event_id_list'] = buildings['event_id_list'].apply(lambda x: [int(i.rstrip("'").lstrip("'")) for i in x])
for i in range(buildings.shape[0]):
for event in buildings.loc[i, 'event_id_list']:
if event in less_serious_crime_event_ids:
buildings.loc[i, 'less_serious_crimes'] += 1
elif event in more_serious_crime_event_ids:
buildings.loc[i, 'more_serious_crimes'] += 1
buildings.head(1)
buildings.to_csv('../data/buildings_with_features_2.csv', index=False)
balanced_buildings = buildings.loc[buildings['building_id'].isin(balanced_data['building_id'].values)].copy()
feature_names = ['norm_lat', 'norm_lon', '311-calls', 'blight_violations', 'less_serious_crimes', 'more_serious_crimes']
feature_types = ['float', 'float', 'int', 'int', 'int', 'int']
names_to_drop = ['building_id','blighted','addr','event_id_list','lat','lon','llcrnrlon','llcrnrlat','urcrnrlon','urcrnrlat', 'permit_cnt', 'crimes']
labels = balanced_buildings['blighted']
data = balanced_buildings.drop(names_to_drop, axis=1, inplace=False)
x_train, x_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, stratify=labels, random_state=0)
x, x_eval, y, y_eval = train_test_split(x_train, y_train, test_size=0.2, stratify=y_train, random_state=500)
dtrain = xgb.DMatrix(x, label=y)
deval = xgb.DMatrix(x_eval, label=y_eval)
dtest = xgb.DMatrix(x_test, label=y_test)
param = {
'booster': 'gbtree',
'subsample': 1.0,
'max_depth': 8,
'min_child_weight': 8,
'eta': 0.1,
'gamma': 5,
'objective':'binary:logistic',
'eval_metric': 'auc',
'lambda': 2, # L2 regularization,
'alpha': 0 # L1 regularization
}
watchlist = [(deval, 'eval'), (dtrain, 'train')]
num_round = 2000
evals_result = {}
bst = xgb.train(param, dtrain, num_round, watchlist, evals_result=evals_result, early_stopping_rounds=20, verbose_eval=False)
plt.plot(np.arange(len(evals_result['eval']['auc'])), evals_result['eval']['auc'], 'b-',\
np.arange(len(evals_result['train']['auc'])), evals_result['train']['auc'], 'r--')
plt.legend(['Eval','Train'], loc='best')
plt.ylim(0.8,1.0)
plt.show()
preds_proba = bst.predict(dtest, ntree_limit=bst.best_ntree_limit)
round(roc_auc_score(y_test, preds_proba), 5)
dtrain_total = xgb.DMatrix(x_train, label=y_train)
Explanation: According to this simplified analysis, location has the most predictive power for blightness. ('norm_lat' and 'norm_lon' correspond to 'normalized latitude' and 'normalized longitude' respectively)
More features from differentiating crimes
End of explanation
res = xgb.cv(param, dtrain_total, num_boost_round=10, nfold=5, metrics={'auc'}, seed=99,\
callbacks=[xgb.callback.print_evaluation(show_stdv=False),\
xgb.callback.early_stop(3)])
plt.plot(np.arange(len(preds_proba)), preds_proba, 'bo', alpha=0.2)
plt.show()
y_pred_proba = bst.predict(dtest, ntree_limit=bst.best_ntree_limit)
fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba, pos_label=1)
plt.figure(figsize=(5,5))
plt.plot([0,1],[0,1], 'k--')
plt.plot(fpr, tpr, 'ro',label='RF', alpha=0.4)
plt.title("ROC Curve")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.show()
feat_imp = [(k,v) for (k,v) in bst.get_fscore().items()] # python3
feat_imp.sort()
feat_names, feat_imps = zip(*feat_imp)
feat_imp_vis = pd.Series(feat_imps, index=feat_names)
fig = plt.figure()
feat_imp_vis.plot(kind='bar', title='Feature importance')
Explanation: Cross Validation
End of explanation
# Train using another balanced data set (1:1 blighted vs nonblighted) with nonblighted of another list of random indexes
balanced_data_2 = pd.read_csv('../data/balanced_data_2.csv')
balanced_keys_2 = pd.read_csv('../data/balanced_keys_2.csv')
balanced_buildings_2 = buildings.loc[buildings['building_id'].isin(balanced_data_2['building_id'].values)].copy()
feature_names = ['norm_lat', 'norm_lon', '311-calls', 'blight_violations', 'less_serious_crimes', 'more_serious_crimes']
feature_types = ['float', 'float', 'int', 'int', 'int', 'int']
names_to_drop = ['building_id','blighted','addr','event_id_list','lat','lon','llcrnrlon','llcrnrlat','urcrnrlon','urcrnrlat', 'permit_cnt', 'crimes']
labels_2 = balanced_buildings_2['blighted']
data_2 = balanced_buildings_2.drop(names_to_drop, axis=1, inplace=False)
x_train, x_test, y_train, y_test = train_test_split(data_2, labels_2, test_size=0.2, stratify=labels, random_state=0)
x, x_eval, y, y_eval = train_test_split(x_train, y_train, test_size=0.2, stratify=y_train, random_state=500)
from sklearn.svm import SVC
clf = SVC(C=0.8, kernel='rbf', probability=True)
clf.fit(x_train, y_train)
y_pred_2 = clf.predict_proba(x_test)
round(roc_auc_score(y_test, y_pred_2[:,1]), 5)
fpr, tpr, thresholds = roc_curve(y_test, y_pred_2[:,1], pos_label=1)
plt.figure(figsize=(5,5))
plt.plot([0,1],[0,1], 'k--')
plt.plot(fpr, tpr, 'ro',label='RF', alpha=0.4)
plt.title("ROC Curve")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.show()
Explanation: The additional feature created using differentiated crimes did not provide a better answer.
SVM
End of explanation
data_violations = pd.read_csv('../data/data_bv.csv')
data_violations.columns
data_violations.FineAmt.unique() # this indicate importance of violations
len(data_violations.ViolationCode.value_counts()) # Too many kind of violations if by category
buildings = pd.read_csv('../data/buildings_with_features_2.csv')
balanced_data = pd.read_csv('../data/balanced_data.csv')
balanced_keys = pd.read_csv('../data/balanced_keys.csv')
def get_num_amt(FineAmt):
'''convert FineAmt string to numerical values'''
amt_str = FineAmt.tolist()[0]
if isinstance(amt_str, float): # nan
# print(amt_str)
return 0
amt_str = amt_str.lstrip('$')
amt = float(amt_str)
return amt
buildings.loc[:, 'event_id_list'] = buildings.loc[:,'event_id_list'].copy().apply(lambda x: str_to_list(x))
buildings.head(1)
buildings['trivial_v'] = 0 # count of violation of minimal importance (< $100)
buildings['small_v'] = 0 # count of violation of small importance ($100 <= v < $1000 )
buildings['medium_v'] = 0 # count of violation with ($1000 <= v < $5000)
buildings['heavy_v'] = 0 # count of violation with (>=$5000)
buildings['event_id_list'] = buildings['event_id_list'].apply(lambda x: [int(i.rstrip("'").lstrip("'")) for i in x])
violation_events = data_violations['event_id'].values
for i in range(buildings.shape[0]):
for event in buildings.loc[i, 'event_id_list']:
if event in violation_events:
amt = get_num_amt(data_violations.loc[data_violations['event_id']==event, 'FineAmt'])
if amt < 100:
buildings.loc[i, 'trivial_v'] += 1
elif amt >= 100 and amt < 1000:
buildings.loc[i, 'small_v'] += 1
elif amt >= 1000 and amt < 5000:
buildings.loc[i, 'medium_v'] += 1
elif amt >= 5000:
buildings.loc[i, 'heavy_v'] += 1
else: # nan
buildings.loc[i, 'trivial_v'] += 1
buildings.head(1)
feature_names = ['norm_lat', 'norm_lon', '311-calls', 'less_serious_crimes', 'more_serious_crimes', 'trivial_v', 'small_v', 'medium_v', 'heavy_v']
feature_types = ['float', 'float', 'int', 'int', 'int', 'int']
names_to_drop = ['building_id','blighted','addr','event_id_list','lat','lon','llcrnrlon','llcrnrlat','urcrnrlon','urcrnrlat', 'permit_cnt', 'crimes', 'blight_violations']
buildings.to_csv('../data/buildings_with_features_3.csv', index=False)
balanced_buildings = buildings.loc[buildings['building_id'].isin(balanced_data['building_id'].values)].copy()
labels = balanced_buildings['blighted']
data = balanced_buildings.drop(names_to_drop, axis=1, inplace=False)
x_train, x_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, stratify=labels, random_state=1234)
x, x_eval, y, y_eval = train_test_split(x_train, y_train, test_size=0.2, stratify=y_train, random_state=6890)
dtrain = xgb.DMatrix(x, label=y)
deval = xgb.DMatrix(x_eval, label=y_eval)
dtest = xgb.DMatrix(x_test, label=y_test)
param = {
'booster': 'gbtree',
'subsample': 1.0,
'max_depth': 7,
'min_child_weight': 4.0,
'eta': 0.2,
'gamma': 3,
'objective':'binary:logistic',
'eval_metric': 'auc',
'lambda': 3, # L2 regularization,
'alpha': 0.5 # L1 regularization
}
watchlist = [(deval, 'eval'), (dtrain, 'train')]
num_round = 1000
evals_result = {}
bst = xgb.train(param, dtrain, num_round, watchlist, evals_result=evals_result, early_stopping_rounds=40, verbose_eval=False)
plt.plot(np.arange(len(evals_result['eval']['auc'])), evals_result['eval']['auc'], 'b-',\
np.arange(len(evals_result['train']['auc'])), evals_result['train']['auc'], 'r--')
plt.legend(['Eval','Train'], loc='best')
plt.ylim(0.8,1.0)
plt.show()
preds_proba = bst.predict(dtest, ntree_limit=bst.best_ntree_limit)
round(roc_auc_score(y_test, preds_proba), 5)
dtrain_total = xgb.DMatrix(x_train, label=y_train)
Explanation: More features from differentiating 'blight violations'
We still have only a very small number of features so far. And one should still benefit from exploring features.
End of explanation
res = xgb.cv(param, dtrain_total, num_boost_round=10, nfold=5, metrics={'auc'}, seed=99,\
callbacks=[xgb.callback.print_evaluation(show_stdv=False),\
xgb.callback.early_stop(3)])
plt.plot(np.arange(len(preds_proba)), preds_proba, 'bo', alpha=0.2)
plt.show()
y_pred_proba = bst.predict(dtest, ntree_limit=bst.best_ntree_limit)
fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba, pos_label=1)
plt.figure(figsize=(5,5))
plt.plot([0,1],[0,1], 'k--')
plt.plot(fpr, tpr, 'ro',label='RF', alpha=0.4)
plt.title("ROC Curve")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.show()
feat_imp = [(k,v) for (k,v) in bst.get_fscore().items()] # python3
feat_imp.sort()
feat_names, feat_imps = zip(*feat_imp)
feat_imp_vis = pd.Series(feat_imps, index=feat_names)
feat_imp_vis.sort_values(inplace=True)
fig = plt.figure()
feat_imp_vis.plot(kind='bar', title='Feature importance')
Explanation: Cross Validation
End of explanation
blighted_buildings = buildings.loc[buildings['blighted'] == 1, :].copy()
nonblighted_buildings = buildings.loc[buildings['blighted']==0, :].copy()
n_blighted = blighted_buildings.shape[0]
n_nonblighted = nonblighted_buildings.shape[0]
print("number of blighted buildings: %d" % n_blighted)
print("number of non-blighted buildings: %d " % n_nonblighted)
Explanation: Bagging
More features produced by differentiating kinds of crimes or kinds of violations did not improve our power of predictions.
We can use bagging to take advantage of extra samples not utilized for nonblighted buildings.
Extra data
End of explanation
feature_names = ['norm_lat', 'norm_lon', '311-calls', 'crimes', 'blight_violations', ]
feature_types = ['float', 'float', 'int', 'int', 'int', 'int']
names_to_drop = ['building_id','blighted','addr','event_id_list','lat','lon','llcrnrlon','llcrnrlat','urcrnrlon','urcrnrlat', 'less_serious_crimes', 'more_serious_crimes', 'trivial_v', 'small_v', 'medium_v', 'heavy_v', 'permit_cnt']
n_test_b = int(n_blighted*0.2) # number for blighted buildings in test
index_b_test = np.random.choice(blighted_buildings.index, n_test_b, replace=False) #blighted
index_nb_test = np.random.choice(nonblighted_buildings.index, n_test_b, replace=False) #nonblighted
blighted_test = blighted_buildings.loc[index_b_test,:]
nonblighted_test = nonblighted_buildings.loc[index_nb_test,:]
balanced_test = pd.concat([blighted_test.copy(), nonblighted_test.copy()])
balanced_test = balanced_test.sample(frac=1, replace=False).reset_index(drop=True)
test_x = balanced_test.drop(names_to_drop, axis=1, inplace=False)
test_y = balanced_test.loc[:,['blighted']].copy()
Explanation: Hold test data
End of explanation
# Train data are chosen from rest of the buildings
rest_blighted_buildings = blighted_buildings.loc[~blighted_buildings.index.isin(index_b_test),:].copy()
rest_nonblighted_buildings = nonblighted_buildings.loc[~nonblighted_buildings.index.isin(index_nb_test),:].copy()
n_train_b = rest_blighted_buildings.shape[0] # number of rows to choose from each kind
indexes_b_train = []
# choose 5 set of train from the same samples with replacement.
for i in range(5):
index_b_train = np.random.choice(rest_blighted_buildings.index, n_train_b, replace=False)
index_nb_train = np.random.choice(rest_nonblighted_buildings.index, n_train_b, replace=False)
indexes_b_train.append([index_b_train, index_nb_train])
train_list = []
for index_pair in indexes_b_train:
index_b, index_nb = tuple(index_pair)
blighted_train = rest_blighted_buildings.loc[index_b,:]
nonblighted_train = rest_nonblighted_buildings.loc[index_nb,:]
balanced_train = pd.concat([blighted_train.copy(), nonblighted_train.copy()])
balanced_train = balanced_train.sample(frac=1, replace=False).reset_index(drop=True)
y_train = balanced_train['blighted'].copy()
x_train = balanced_train.drop(names_to_drop, axis=1, inplace=False)
train_list.append((x_train, y_train))
param = {
'booster': 'gbtree',
'subsample': 1.0,
'max_depth': 6,
'min_child_weight': 3.0,
'eta': 0.2,
'gamma': 3.0,
'objective':'binary:logistic',
'eval_metric': 'auc',
'lambda': 4, # L2 regularization,
'alpha': 0 # L1 regularization
}
num_round = 1000
def train_model(x, x_eval, y, y_eval, param):
#x, x_eval, y, y_eval = train_test_split(x_train, y_train, test_size=0.2, stratify=y_train)
dtrain = xgb.DMatrix(x, label=y)
deval = xgb.DMatrix(x_eval, label=y_eval)
watchlist = [(deval, 'eval'), (dtrain, 'train')]
evals_result={}
bst = xgb.train(param, dtrain, num_boost_round=1000, evals=watchlist, evals_result=evals_result,
early_stopping_rounds=40, verbose_eval=False)
return (bst, evals_result)
Explanation: Train data
End of explanation
x_train = train_list[0][0]
y_train = train_list[0][1]
x, x_eval, y, y_eval = train_test_split(x_train, y_train, test_size=0.2, stratify=y_train)
bst, evals_result = train_model(x, x_eval, y, y_eval, param)
plt.plot(np.arange(len(evals_result['eval']['auc'])), evals_result['eval']['auc'], 'b-',\
np.arange(len(evals_result['train']['auc'])), evals_result['train']['auc'], 'r--')
plt.legend(['Eval','Train'], loc='best')
plt.ylim(0.82,0.9)
plt.show()
Explanation: One example
End of explanation
res = xgb.cv(param, dtrain_total, num_boost_round=10, nfold=5, metrics={'auc'}, seed=9999,\
callbacks=[xgb.callback.print_evaluation(show_stdv=False),\
xgb.callback.early_stop(3)])
dtest = xgb.DMatrix(test_x, label=test_y['blighted'])
preds_proba = bst.predict(dtest, ntree_limit=bst.best_ntree_limit)
round(roc_auc_score(test_y, preds_proba), 5)
Explanation: Cross Validation
End of explanation
bsts = []
fig, axes = plt.subplots(len(train_list), sharex=True, sharey=True, figsize=(6,12))
for train_data,i in zip(train_list,range(len(axes))):
x_train, y_train = train_data
x, x_eval, y, y_eval = train_test_split(x_train, y_train, test_size=0.2, stratify=y_train)
bst, evals_result = train_model(x, x_eval, y, y_eval, param)
axes[i].plot(np.arange(len(evals_result['eval']['auc'])), evals_result['eval']['auc'], 'b-',\
np.arange(len(evals_result['train']['auc'])), evals_result['train']['auc'], 'r--')
bsts.append(bst)
plt.legend(['Eval','Train'], loc='best')
plt.ylim(0.8,0.9)
plt.show()
#preds_proba = bst.predict(dtest, ntree_limit=bst.best_ntree_limit)
y_preds = np.mean(np.array([bst.predict(dtest, ntree_limit=bst.best_ntree_limit) for bst in bsts]), axis=0)
round(roc_auc_score(test_y, y_preds), 5)
Explanation: Average model
End of explanation
fpr, tpr, thresholds = roc_curve(test_y, y_preds, pos_label=1)
plt.figure(figsize=(5,5))
plt.plot([0,1],[0,1], 'k--')
plt.plot(fpr, tpr, 'ro',label='RF', alpha=0.4)
plt.title("ROC Curve")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.savefig('../data/ROC_Curve_combined.png')
plt.show()
Explanation: An AUC score of 0.8625 was achieved by reducing variance.
End of explanation |
14,701 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Python II Hausaufgaben
1.Baue eine Funktion mit dem Namen 'double', der die Zahl 5 vedoppelt
Step1: 2.Baue einen for-loop, der durch vordefinierte Zahlen-list geht, und mithilfe der eben kreierten eigenen Funktion, alle Resultate verdoppelt ausdruckt.
Step2: 3.Entwickle einen Code, der den Nutzer nach der Länge seinem Namen fragt, und ihm dann sagt, wieviele Zeichen sein Name hat.
Step3: 4.Entwickle eine Funktion mit dem Namen km_rechner, der für die untenaufgeführten automatisch die Umrechung von Meilen in km durchführt und gerundet auf eine Kommastelle anzeigt.
Step4: 5.Wir haben in einem Dictionary mit Massen, die mit ganz unterschiedlichen Formaten daherkommen. Entwickle eine Funktion namens m_converter, die diese Formate berücksichtigt, und in Meter umwandelt. | Python Code:
def double(number):
result = number*2
return result
double(5)
Explanation: Python II Hausaufgaben
1.Baue eine Funktion mit dem Namen 'double', der die Zahl 5 vedoppelt:
End of explanation
lst = list(range(1,5))
for elem in lst:
print(double(elem))
Explanation: 2.Baue einen for-loop, der durch vordefinierte Zahlen-list geht, und mithilfe der eben kreierten eigenen Funktion, alle Resultate verdoppelt ausdruckt.
End of explanation
name = input("How many letters does your name contain? ")
print("Your name has " + str(len(name)) +" letters.")
Explanation: 3.Entwickle einen Code, der den Nutzer nach der Länge seinem Namen fragt, und ihm dann sagt, wieviele Zeichen sein Name hat.
End of explanation
def km_rechner(Distanz):
result = round(Distanz*0.621371,1)
return print(result)
km_rechner(5)
km_rechner(123)
km_rechner(53)
Explanation: 4.Entwickle eine Funktion mit dem Namen km_rechner, der für die untenaufgeführten automatisch die Umrechung von Meilen in km durchführt und gerundet auf eine Kommastelle anzeigt.
End of explanation
#Unsere Formate
var_first = { 'measurement': 3.4, 'scale': 'kilometer' }
var_second = { 'measurement': 9.1, 'scale': 'mile' }
var_third = { 'measurement': 2.0, 'scale': 'meter' }
var_fourth = { 'measurement': 9.0, 'scale': 'inches' }
def m_converter(length):
if length["scale"]=="kilometer":
result = round(length["measurement"]*1000,1)
elif length["scale"]=="mile":
result = round(length["measurement"]*1609.34,1)
elif length["scale"]=="meter":
result = round(length["measurement"]*1,1)
elif length["scale"]=="inches":
result = round(length["measurement"]*0.0254,1)
return result
print(m_converter(var_first))
print(m_converter(var_second))
print(m_converter(var_third))
print(m_converter(var_fourth))
Explanation: 5.Wir haben in einem Dictionary mit Massen, die mit ganz unterschiedlichen Formaten daherkommen. Entwickle eine Funktion namens m_converter, die diese Formate berücksichtigt, und in Meter umwandelt.
End of explanation |
14,702 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Reading and manipulating datasets with Pandas
This notebook shows how to create Series and Dataframes with Pandas. Also, how to read CSV files and creaate pivot tables. The first part is based on the chapter 3 of the <a href=" http
Step1: 1. The Pandas Series Object
A Pandas Series is a one-dimensional array of indexed data. It can be created from a list or array as follows
Step2: As we see in the output, the Series wraps both a sequence of values and a sequence of indices, which we can access with the values and index attributes. The values are simply a familiar NumPy array
Step3: The index is an array-like object of type pd.Index, which we'll discuss in more detail momentarily.
Step4: Like with a NumPy array, data can be accessed by the associated index via the familiar Python square-bracket notation
Step5: Series as generalized NumPy array
From what we've seen so far, it may look like the Series object is basically interchangeable with a one-dimensional NumPy array. The essential difference is the presence of the index
Step6: And the item access works as expected
Step7: Series as specialized dictionary
In this way, you can think of a Pandas Series a bit like a specialization of a Python dictionary. A dictionary is a structure that maps arbitrary keys to a set of arbitrary values, and a Series is a structure which maps typed keys to a set of typed values. This typing is important
Step8: You can notice the indexes were sorted lexicographically. That's the default behaviour in Pandas
Step9: Unlike a dictionary, though, the Series also supports array-style operations such as slicing
Step10: 2. The Pandas DataFrame Object
The next fundamental structure in Pandas is the DataFrame. Like the Series object discussed in the previous section, the DataFrame can be thought of either as a generalization of a NumPy array, or as a specialization of a Python dictionary. We'll now take a look at each of these perspectives.
DataFrame as a generalized NumPy array
If a Series is an analog of a one-dimensional array with flexible indices, a DataFrame is an analog of a two-dimensional array with both flexible row indices and flexible column names.
Step11: Now that we have this along with the population Series from before, we can use a dictionary to construct a single two-dimensional object containing this information
Step12: DataFrame as specialized dictionary
Similarly, we can also think of a DataFrame as a specialization of a dictionary. Where a dictionary maps a key to a value, a DataFrame maps a column name to a Series of column data. For example, asking for the 'area' attribute returns the Series object containing the areas we saw earlier
Step13: Constructing DataFrame objects
A Pandas DataFrame can be constructed in a variety of ways. Here we'll give several examples.
From a single Series object¶
A DataFrame is a collection of Series objects, and a single-column DataFrame can be constructed from a single Series
Step14: From a dictionary of Series objects
As we saw before, a DataFrame can be constructed from a dictionary of Series objects as well
Step15: 3. Reading a CSV file and doing common Pandas operations
Step16: 4. Loading ful dataset
Step17: OLD | Python Code:
import numpy as np
from __future__ import print_function
import pandas as pd
pd.__version__
Explanation: Reading and manipulating datasets with Pandas
This notebook shows how to create Series and Dataframes with Pandas. Also, how to read CSV files and creaate pivot tables. The first part is based on the chapter 3 of the <a href=" http://nbviewer.jupyter.org/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/03.01-Introducing-Pandas-Objects.ipynb">Python Data Science Handbook</a>.
Author: Roberto Muñoz <br />
Email: [email protected]
End of explanation
data = pd.Series([0.25, 0.5, 0.75, 1.0])
data
Explanation: 1. The Pandas Series Object
A Pandas Series is a one-dimensional array of indexed data. It can be created from a list or array as follows:
End of explanation
data.values
Explanation: As we see in the output, the Series wraps both a sequence of values and a sequence of indices, which we can access with the values and index attributes. The values are simply a familiar NumPy array:
End of explanation
data.index
Explanation: The index is an array-like object of type pd.Index, which we'll discuss in more detail momentarily.
End of explanation
data[1]
Explanation: Like with a NumPy array, data can be accessed by the associated index via the familiar Python square-bracket notation:
End of explanation
data = pd.Series([0.25, 0.5, 0.75, 1.0],
index=['a', 'b', 'c', 'd'])
data
Explanation: Series as generalized NumPy array
From what we've seen so far, it may look like the Series object is basically interchangeable with a one-dimensional NumPy array. The essential difference is the presence of the index: while the Numpy Array has an implicitly defined integer index used to access the values, the Pandas Series has an explicitly defined index associated with the values.
End of explanation
data['b']
Explanation: And the item access works as expected:
End of explanation
population_dict = {'Arica y Parinacota': 243149,
'Antofagasta': 631875,
'Metropolitana de Santiago': 7399042,
'Valparaiso': 1842880,
'Bíobío': 2127902,
'Magallanes y Antártica Chilena': 165547}
population = pd.Series(population_dict)
population
Explanation: Series as specialized dictionary
In this way, you can think of a Pandas Series a bit like a specialization of a Python dictionary. A dictionary is a structure that maps arbitrary keys to a set of arbitrary values, and a Series is a structure which maps typed keys to a set of typed values. This typing is important: just as the type-specific compiled code behind a NumPy array makes it more efficient than a Python list for certain operations, the type information of a Pandas Series makes it much more efficient than Python dictionaries for certain operations.
End of explanation
population['Arica y Parinacota']
Explanation: You can notice the indexes were sorted lexicographically. That's the default behaviour in Pandas
End of explanation
population['Metropolitana':'Valparaíso']
Explanation: Unlike a dictionary, though, the Series also supports array-style operations such as slicing:
End of explanation
# Area in km^2
area_dict = {'Arica y Parinacota': 16873.3,
'Antofagasta': 126049.1,
'Metropolitana de Santiago': 15403.2,
'Valparaiso': 16396.1,
'Bíobío': 37068.7,
'Magallanes y Antártica Chilena': 1382291.1}
area = pd.Series(area_dict)
area
Explanation: 2. The Pandas DataFrame Object
The next fundamental structure in Pandas is the DataFrame. Like the Series object discussed in the previous section, the DataFrame can be thought of either as a generalization of a NumPy array, or as a specialization of a Python dictionary. We'll now take a look at each of these perspectives.
DataFrame as a generalized NumPy array
If a Series is an analog of a one-dimensional array with flexible indices, a DataFrame is an analog of a two-dimensional array with both flexible row indices and flexible column names.
End of explanation
regions = pd.DataFrame({'population': population,
'area': area})
regions
regions.index
regions.columns
Explanation: Now that we have this along with the population Series from before, we can use a dictionary to construct a single two-dimensional object containing this information:
End of explanation
regions['area']
Explanation: DataFrame as specialized dictionary
Similarly, we can also think of a DataFrame as a specialization of a dictionary. Where a dictionary maps a key to a value, a DataFrame maps a column name to a Series of column data. For example, asking for the 'area' attribute returns the Series object containing the areas we saw earlier:
End of explanation
pd.DataFrame(population, columns=['population'])
Explanation: Constructing DataFrame objects
A Pandas DataFrame can be constructed in a variety of ways. Here we'll give several examples.
From a single Series object¶
A DataFrame is a collection of Series objects, and a single-column DataFrame can be constructed from a single Series:
End of explanation
pd.DataFrame({'population': population,
'area': area}, columns=['population', 'area'])
Explanation: From a dictionary of Series objects
As we saw before, a DataFrame can be constructed from a dictionary of Series objects as well:
End of explanation
regiones_file='data/chile_regiones.csv'
provincias_file='data/chile_provincias.csv'
comunas_file='data/chile_comunas.csv'
regiones=pd.read_csv(regiones_file, header=0, sep=',')
provincias=pd.read_csv(provincias_file, header=0, sep=',')
comunas=pd.read_csv(comunas_file, header=0, sep=',')
print('regiones table: ', regiones.columns.values.tolist())
print('provincias table: ', provincias.columns.values.tolist())
print('comunas table: ', comunas.columns.values.tolist())
regiones.head()
provincias.head()
comunas.head()
regiones_provincias=pd.merge(regiones, provincias, how='outer')
regiones_provincias.head()
provincias_comunas=pd.merge(provincias, comunas, how='outer')
provincias_comunas.head()
regiones_provincias_comunas=pd.merge(regiones_provincias, comunas, how='outer')
regiones_provincias_comunas.index.name='ID'
regiones_provincias_comunas.head()
regiones_provincias_comunas.to_csv('chile_demographic_data.csv', index=False)
Explanation: 3. Reading a CSV file and doing common Pandas operations
End of explanation
data_file='data/chile_demographic.csv'
data=pd.read_csv(data_file, header=0, sep=',')
data
data.sort_values('Poblacion')
data.sort_values('Poblacion', ascending=False)
(data.groupby(data['Region'])['Poblacion','Superficie'].sum())
(data.groupby(data['Region'])['Poblacion','Superficie'].sum()).sort_values(['Poblacion'])
Explanation: 4. Loading ful dataset
End of explanation
surveygizmo=regiones_provincias_comunas[['RegionNombre','ProvinciaNombre','ComunaNombre']]
surveygizmo.loc[:,'RegionNombre']=surveygizmo.apply(lambda x: x['RegionNombre'].replace("'",""), axis=1)
surveygizmo.loc[:,'ProvinciaNombre']=surveygizmo.apply(lambda x: x['ProvinciaNombre'].replace("'",""), axis=1)
surveygizmo.loc[:,'ComunaNombre']=surveygizmo.apply(lambda x: x['ComunaNombre'].replace("'",""), axis=1)
surveygizmo.rename(columns={'RegionNombre': 'Region:', 'ProvinciaNombre': 'Provincia:', 'ComunaNombre': 'Comuna:'}, inplace=True)
surveygizmo.to_csv('chile_demographic_surveygizmo.csv', index=False)
surveygizmo.head()
Explanation: OLD
End of explanation |
14,703 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Readable Syntax - Quicksort in Python
Quicksort Pseudocode from Wikipedia
Here the Python implementation
Step1: Interactive and Batch possibilities - Munich temperatures
Step2: Python is not slow - use vector operations
The C/Fortran paradigm to manipulate arrays by visiting each element is wrong in Python nearly always!
Step3: Manipulating arrays by vector operations is typically a factor of 10 faster than the visit each element strategy! | Python Code:
import random
# A python implementation of the Wikipedia quicksort algorithm
def my_quicksort(array):
if len(array) < 1:
return array
pivot = array[0] # select a pivot (first element of list)
rest = array[1:] # the array with the pivot
# removed
less = [x for x in rest if x <= pivot]
greater = [x for x in rest if x > pivot]
return my_quicksort(less) + [pivot] + my_quicksort(greater)
testarr = [random.randint(-1000, 1000) for i in range(30)]
print(testarr)
print(my_quicksort(testarr))
Explanation: Readable Syntax - Quicksort in Python
Quicksort Pseudocode from Wikipedia
Here the Python implementation
End of explanation
# playing around with the data interactively
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
data = np.loadtxt("data/munich_temperatures.txt")
day = data[:,0]
temp = data[:,1]
%load code/temperature.py
Explanation: Interactive and Batch possibilities - Munich temperatures
End of explanation
import numpy as np
x = np.linspace(0.0, 2.0 * np.pi, 100)
print(x)
%%timeit
import numpy as np
# C-like element-wise array manipulation
x = np.linspace(0.0, 2.0 * np.pi, 100)
y = np.zeros(len(x))
for i in range(len(x)):
y[i] = np.sin(x[i])
Explanation: Python is not slow - use vector operations
The C/Fortran paradigm to manipulate arrays by visiting each element is wrong in Python nearly always!
End of explanation
%%timeit
import numpy as np
# fast vector operations
x = np.linspace(0.0, 2.0 * np.pi, 100)
y = np.sin(x)
Explanation: Manipulating arrays by vector operations is typically a factor of 10 faster than the visit each element strategy!
End of explanation |
14,704 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<h1>Getting started with the computational analysis of games
Step1: Gambit version 16.0.0 is the current development version. You can get it from http
Step2: Inspecting a game
The game that we will use as our starting point is one which many of you may have encountered in some variation. Myerson's (1991) textbook refers to this as a one-card poker game; Reiley et at (2008) call this "stripped-down poker."
There is a deck consisting of two types of cards
Step3: Gambit's .efg format is a serialisation of an extensive game. The format looks somewhat dated (and indeed it was finalised in 1994), but is fast
Step4: The game offers a "Pythonic" interface. Most objects in a game can be accessed via iterable collections.
Step5: All objects have an optional text label, which can be used to retrieve it from the collection
Step6: In this game, Alice has two information sets
Step7: The chance or nature player is a special player in the players collection.
Step8: Gambit does sorting of the objects in each collection, so indexing collections by integer indices also works reliably if you save and load a game again.
Step9: We can assign particular game objects to variables for convenient referencing. In this case, we will explore the strategic effects of changing the relative probabilities of the Ace and King cards.
Step10: In the original version of the game, it was assumed that the Ace and King cards were equally likely to be dealt.
Step11: Computing Nash equilibria
Gambit offers a variety of methods for computing Nash equilibria of games, which we will discuss in more detail separately. This is a two-player game in extensive form, for which we can use Lemke's algorithm applied to the sequence form of the game.
In the Python interface, solution methods are offered in the gambit.nash module. Each method also is wrapped as a standalone command-line binary.
Step12: The result of this method is a list of (mixed) behaviour profiles. (Future
Step13: A behaviour profile looks like a nested list. Entries are of the form profile[player][infoset][action].
Step14: We can compute various interesting quantities about behaviour profiles. Most interesting is perhaps the payoff to each player; because this is a constant-sum game, this is the value of the game.
Step15: Bob is randomising at his information set, so he must be indifferent between his actions there. We can check this.
Step16: As we teach our students, the key to understanding this game is that Alice plays so as to manipulate Bob's beliefs about the likelihood she has the Ace. We can examine Bob's beliefs over the nodes (members) of his one information set.
Given the structure of the betting rules, Bob becomes indifferent to his actions when he thinks there is a 3/4 chance Alice has the Ace.
Step17: Construction of the reduced normal form
The call to lcp_solve above uses the sequence form rather than the (reduced) strategic form of the game. This representation takes advantage of the tree structure, and can avoid (in many games of interest) the exponential blowup of the size of the strategic form relative to the extensive form. (More details on this in a while!)
Nevertheless, the reduced strategic form of a game can be of interest. Gambit implements transparently the conversions between the extensive and strategic representations. For games in extensive form, the reduced strategic form is computed on-the-fly from the game tree; that is, the full normal form payoff tables are not stored in memory.
Each player has a data member strategies which lists the reduced normal form strategies (s)he has.
Step18: We can also do a quick visualisation of the payoff matrix of the game using the built-in HTML output (plus Jupyter's inline rendering of HTML!)
Disclaimer
Step19: Bonus note
Step20: We can convert our behaviour profile to a corresponding mixed strategy profile. This is indexable as a nested list with elements [player][strategy].
Step21: Of course, Alice will receive the same expected payoff from this mixed strategy profile as she would in the original behaviour profile.
Step22: We can also ask what the expected payoffs to each of the strategies are. Alice's last two strategies correspond to folding when she has the Ace, which is dominated.
Step23: Automating/scripting analysis
The real gain in having libraries for doing computation in game theory is to be able to script computations. For example, we can explore how the solution to the game changes, as we change the probability that Alice is dealt the Ace.
Payoffs and probabilities are represented in games in Gambit as exact-precision numbers, which can be either rational numbers of (exact-precision) decimals. These are called gambit.Rational and gambit.Decimal, and are compatible with the Python fractions.Fraction and decimal.Decimal classes, respectively. (In Gambit 16.0.0, they are derived from them.)
Caveat/Tip
Step24: As a final experiment, we can also change the payoff structure instead of the probability of the high card. How would the equilibrium change if a Raise/Meet required putting 2 into the pot instead of 1?
Step25: The outcomes member of the game lists all of the outcomes. An outcome can appear at multiple nodes. Outcomes, like all other objects, can be given text labels for easy reference.
Step26: Once again, solve the revised game using Lemke's algorithm on the sequence form.
Step27: The value of the game to Alice is now higher
Step28: Bob's equilibrium belief about Alice's hand is also different of course, as he now is indifferent between meeting and passing Alice's raise when he thinks the chance she has the Ace is 2/3 (instead of 3/4 before).
Step29: Serialising the game in other formats
We already saw above some of the formats that can be used to serialise games. There are a few other standard options. For example, Gambit also has a format for games in strategic (or normal) form. You can get the reduced normal form of the extensive game in this format directly
Step30: Also, we can write the game out in the XML format used by Game Theory Explorer | Python Code:
import gambit
Explanation: <h1>Getting started with the computational analysis of games:</h1>
<h2>Playing "stripped down" poker</h2>
<i>Theodore L. Turocy</i><br/>
<i>University of East Anglia</i>
<br/><br/>
<h3>EC'16 Workshop
24 July 2016</h3>
End of explanation
gambit.__version__
Explanation: Gambit version 16.0.0 is the current development version. You can get it from http://www.gambit-project.org.
End of explanation
g = gambit.Game.read_game("poker.efg")
Explanation: Inspecting a game
The game that we will use as our starting point is one which many of you may have encountered in some variation. Myerson's (1991) textbook refers to this as a one-card poker game; Reiley et at (2008) call this "stripped-down poker."
There is a deck consisting of two types of cards: Ace and King. There are two players, Alice and Bob. Both start by putting 1 in the pot. One player (Alice) draws a card; initially assume the cards are in equal proportion in the deck. Alice sees her card, and then decides whether she wants to raise (add another 1 to the pot) or fold (and concede the pot to Bob). If she raises, play passes to Bob, who much decide whether to meet her raise (and add another 1 to the pot) or pass (and concede the pot to Alice). If Alice raises and Bob meets, Alice reveals her card: If it is an Ace, she takes the pot, whereas if it is a King, Bob does.
<center>
<img src="kingbob.jpg" width="30%">
</center>
Here is what the game looks like in extensive form (as drawn by Gambit's graphical viewer, which we will touch on separately):
<center>
<img src="poker.png" width="60%">
</center>
End of explanation
g
Explanation: Gambit's .efg format is a serialisation of an extensive game. The format looks somewhat dated (and indeed it was finalised in 1994), but is fast: recently I loaded a game with about 1M nodes in under 2s.
End of explanation
g.players
Explanation: The game offers a "Pythonic" interface. Most objects in a game can be accessed via iterable collections.
End of explanation
g.players["Alice"]
Explanation: All objects have an optional text label, which can be used to retrieve it from the collection:
End of explanation
g.players["Alice"].infosets
Explanation: In this game, Alice has two information sets: when she has drawn the Ace, and when she has drawn the King:
End of explanation
g.players.chance
g.players.chance.infosets
Explanation: The chance or nature player is a special player in the players collection.
End of explanation
g.players.chance.infosets[0].actions
Explanation: Gambit does sorting of the objects in each collection, so indexing collections by integer indices also works reliably if you save and load a game again.
End of explanation
deal = g.players.chance.infosets[0]
Explanation: We can assign particular game objects to variables for convenient referencing. In this case, we will explore the strategic effects of changing the relative probabilities of the Ace and King cards.
End of explanation
deal.actions["A"].prob
deal.actions["K"].prob
Explanation: In the original version of the game, it was assumed that the Ace and King cards were equally likely to be dealt.
End of explanation
result = gambit.nash.lcp_solve(g)
Explanation: Computing Nash equilibria
Gambit offers a variety of methods for computing Nash equilibria of games, which we will discuss in more detail separately. This is a two-player game in extensive form, for which we can use Lemke's algorithm applied to the sequence form of the game.
In the Python interface, solution methods are offered in the gambit.nash module. Each method also is wrapped as a standalone command-line binary.
End of explanation
len(result)
Explanation: The result of this method is a list of (mixed) behaviour profiles. (Future: the return value will be encapsulated in a results class retaining more detailed metadata about the run of the algorithm.)
In this game, there is a unique (Bayes-)Nash equilibrium.
End of explanation
result[0]
result[0][g.players["Alice"]]
result[0][g.players["Bob"]]
Explanation: A behaviour profile looks like a nested list. Entries are of the form profile[player][infoset][action].
End of explanation
result[0].payoff(g.players["Alice"])
result[0].payoff(g.players["Bob"])
Explanation: We can compute various interesting quantities about behaviour profiles. Most interesting is perhaps the payoff to each player; because this is a constant-sum game, this is the value of the game.
End of explanation
result[0].payoff(g.players["Bob"].infosets[0].actions[0])
result[0].payoff(g.players["Bob"].infosets[0].actions[1])
Explanation: Bob is randomising at his information set, so he must be indifferent between his actions there. We can check this.
End of explanation
result[0].belief(g.players["Bob"].infosets[0].members[0])
result[0].belief(g.players["Bob"].infosets[0].members[1])
Explanation: As we teach our students, the key to understanding this game is that Alice plays so as to manipulate Bob's beliefs about the likelihood she has the Ace. We can examine Bob's beliefs over the nodes (members) of his one information set.
Given the structure of the betting rules, Bob becomes indifferent to his actions when he thinks there is a 3/4 chance Alice has the Ace.
End of explanation
g.players["Alice"].strategies
g.players["Bob"].strategies
Explanation: Construction of the reduced normal form
The call to lcp_solve above uses the sequence form rather than the (reduced) strategic form of the game. This representation takes advantage of the tree structure, and can avoid (in many games of interest) the exponential blowup of the size of the strategic form relative to the extensive form. (More details on this in a while!)
Nevertheless, the reduced strategic form of a game can be of interest. Gambit implements transparently the conversions between the extensive and strategic representations. For games in extensive form, the reduced strategic form is computed on-the-fly from the game tree; that is, the full normal form payoff tables are not stored in memory.
Each player has a data member strategies which lists the reduced normal form strategies (s)he has.
End of explanation
import IPython.display; IPython.display.HTML(g.write('html'))
Explanation: We can also do a quick visualisation of the payoff matrix of the game using the built-in HTML output (plus Jupyter's inline rendering of HTML!)
Disclaimer: There's a bug in the 16.0.0 release which prevents the correct generation of HTML; this will be corrected in 16.0.1 (and is corrected in the 'master' branch of the git repository already).
End of explanation
print g.write('sgame')
Explanation: Bonus note: Gambit also supports writing out games using Martin Osborne's sgame LaTeX style: https://www.economics.utoronto.ca/osborne/latex/. This doesn't have auto-rendering magic in Jupyter, but it's all ready to cut-and-paste to your favourite editor.
End of explanation
msp = result[0].as_strategy()
msp
Explanation: We can convert our behaviour profile to a corresponding mixed strategy profile. This is indexable as a nested list with elements [player][strategy].
End of explanation
msp.payoff(g.players["Alice"])
Explanation: Of course, Alice will receive the same expected payoff from this mixed strategy profile as she would in the original behaviour profile.
End of explanation
msp.strategy_values(g.players["Alice"])
Explanation: We can also ask what the expected payoffs to each of the strategies are. Alice's last two strategies correspond to folding when she has the Ace, which is dominated.
End of explanation
import pandas
probs = [ gambit.Rational(i, 20) for i in xrange(1, 20) ]
results = [ ]
for prob in probs:
g.players.chance.infosets[0].actions[0].prob = prob
g.players.chance.infosets[0].actions[1].prob = 1-prob
result = gambit.nash.lcp_solve(g)[0]
results.append({ "prob": prob,
"alice_payoff": result.payoff(g.players["Alice"]),
"bluff": result[g.players["Alice"].infosets[1].actions[0]],
"belief": result.belief(g.players["Bob"].infosets[0].members[1]) })
df = pandas.DataFrame(results)
df
import pylab
%matplotlib inline
pylab.plot(df.prob, df.bluff, '-')
pylab.xlabel("Probability Alice gets ace")
pylab.ylabel("Probability Alice bluffs with king")
pylab.show()
pylab.plot(df.prob, df.alice_payoff, '-')
pylab.xlabel("Probability Alice gets ace")
pylab.ylabel("Alice's equilibrium payoff")
pylab.show()
pylab.plot(df.prob, df.belief, '-')
pylab.xlabel("Probability Alice gets ace")
pylab.ylabel("Bob's equilibrium belief")
pylab.ylim(0,1)
pylab.show()
Explanation: Automating/scripting analysis
The real gain in having libraries for doing computation in game theory is to be able to script computations. For example, we can explore how the solution to the game changes, as we change the probability that Alice is dealt the Ace.
Payoffs and probabilities are represented in games in Gambit as exact-precision numbers, which can be either rational numbers of (exact-precision) decimals. These are called gambit.Rational and gambit.Decimal, and are compatible with the Python fractions.Fraction and decimal.Decimal classes, respectively. (In Gambit 16.0.0, they are derived from them.)
Caveat/Tip: This means one cannot set a payoff or probability to be a floating-point number. We justify this based on the principle "explicit is better than implicit." In two-player games, the extreme points of the set of Nash equilibria are rational numbers, whenever the data of the game are rational, and the Gambit equilibrium computation methods take advantage of this. If the payoff of a game were specified as a floating-point number, e.g. 0.333333 instead of 1/3, surprising results can occur due to rounding.
End of explanation
deal.actions[0].prob = gambit.Rational(1,2)
deal.actions[1].prob = gambit.Rational(1,2)
Explanation: As a final experiment, we can also change the payoff structure instead of the probability of the high card. How would the equilibrium change if a Raise/Meet required putting 2 into the pot instead of 1?
End of explanation
g.outcomes["Alice wins big"]
g.outcomes["Alice wins big"][0] = 3
g.outcomes["Alice wins big"][1] = -3
g.outcomes["Bob wins big"][0] = -3
g.outcomes["Bob wins big"][1] = 3
Explanation: The outcomes member of the game lists all of the outcomes. An outcome can appear at multiple nodes. Outcomes, like all other objects, can be given text labels for easy reference.
End of explanation
result = gambit.nash.lcp_solve(g)
len(result)
result[0]
Explanation: Once again, solve the revised game using Lemke's algorithm on the sequence form.
End of explanation
result[0].payoff(g.players["Alice"])
Explanation: The value of the game to Alice is now higher: 1/2 instead of 1/3 with the original payoffs.
End of explanation
result[0].belief(g.players["Bob"].infosets[0].members[0])
Explanation: Bob's equilibrium belief about Alice's hand is also different of course, as he now is indifferent between meeting and passing Alice's raise when he thinks the chance she has the Ace is 2/3 (instead of 3/4 before).
End of explanation
print g.write('nfg')
Explanation: Serialising the game in other formats
We already saw above some of the formats that can be used to serialise games. There are a few other standard options. For example, Gambit also has a format for games in strategic (or normal) form. You can get the reduced normal form of the extensive game in this format directly:
End of explanation
print g.write('gte')
Explanation: Also, we can write the game out in the XML format used by Game Theory Explorer:
End of explanation |
14,705 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Pynetics QuickStart
In this example we are going to build a very simple and useless algorithm to explore the possibilities of the pynetics library.
Our problem will be as follows. We'll going to develop a genetic algorithm to find the what binary list of lenght $L=N$ is the one with the bigger sum. Yes, total absurd, but useful to learn GAs and this library.
Let's start from the begining. The individuals.
Representing individuals
The individuals are the most important component in a genetic algorithm. Each individual is a possible solution, good or bad, for our problem.
We want to model an individual capable of represent a possible solution for our problem. Pynetics have a perfect representation for this problem, the BinaryIndividual, so it's no necessary to create a custom individual. We'll cross that bridge when we get to it.
The algorithm will create individuals using a SpawningPool implementation. We're going to use a special implementation inside the module pynetics.ga_bin called BinaryIndividualSpawningPool, that creates BinaryIndividual instances of the given size.
Step1: Now the spawning pool will be capable of creating individuals of the specified size. The genetic algorithm will create a population of individuals using the spawn method to populate it. We'll also specify a population size for the algorithm and see an example of population
Step2: Fitness
Our individuals are solutions for the problem but, ¿how can we measure how good or how bad are they? That is what the fitness is for. It's a function that will return a float value. The bigger the value, the better the individual is.
We could use a fitness function equals to the sum of al $1$'s but if we want to stop the algorithm based on the fitness, is not the same the best fitness for an individual of size 10 than an individual of size 20.
So the fitness funcion we're gonna use is a function with the form $1 / (1 + \alpha)$, being $\alpha$ the error of our individual. The error will be computed as the number of $0$'s the individual has.
Step3: This function guarantees that the fitness will belong to the $(0, 1]$ interval. Let's see an example of how it behaves.
Step4: The stop condition
Now we're gonna specify when our algorithm should stop. This is controlled by a stop condition.
Step5: Instances of the class FitnessBound are created by specifying the fitness thresshold above which we can stop our algorithm. We have specified a FitnessBound object with a thressholdd of $1$. That means that all the values below $1$ will not stop our algorithm whereas all the values upper or equal than $1$ will do.
Because our fitness value belongs to the $(0, 1]$ interval, the algorithm will stop only when the population have an individual with a fitness of $1$ (all $1$'s).
Selecting individuals
For our GA, we're going to use a tournament selection. Tournament selection works by selecting $n$ individuals randomly from the population and then returning the best of then (based on their fitnesses).
Step6: Recombining
Now the recombination, i.e. the step where the individuals are selected and their genetic informatio is going to be inherited by their progeny.
We'll use a OnePointRecombination, included in the module ga_list. Also, for the recommender we'll going to specify the probability for two individuals to mate to be 1, that is, they always mate.
Step7: Mutations
The same with mutations. The mutation operator we're gona use is AllGenesCanSwitch, a mutation where for each binary gene there is a probability to be switched from $0$ to $1$ and viceversa. It belongs to the module ga_bin.
Step8: Replacement
Once we've got the offspring, we need to replace the population with these new borns. The operator for that matter will be a LowElitism operator, where the worst individuals of the popularin are replaced by the offspring.
We'll fix the replacement rate in $0.9$, i.e. a $90\%$ of the pooulation will be replaced for each iteration of the loop.
Step9: The algorithm
Step10: Now we've created our algorithm, can run it to find the right solution. Let's see how it works
Step11: We can specify functions to be executed while the training takes place. The next example adds some of those functions. | Python Code:
from pynetics.ga_bin import BinaryIndividualSpawningPool
# Let's define the size of our individuals (the numer of 1's and 0's)
individual_size = 25
binary_individual_spawning_pool=BinaryIndividualSpawningPool(size=individual_size)
Explanation: Pynetics QuickStart
In this example we are going to build a very simple and useless algorithm to explore the possibilities of the pynetics library.
Our problem will be as follows. We'll going to develop a genetic algorithm to find the what binary list of lenght $L=N$ is the one with the bigger sum. Yes, total absurd, but useful to learn GAs and this library.
Let's start from the begining. The individuals.
Representing individuals
The individuals are the most important component in a genetic algorithm. Each individual is a possible solution, good or bad, for our problem.
We want to model an individual capable of represent a possible solution for our problem. Pynetics have a perfect representation for this problem, the BinaryIndividual, so it's no necessary to create a custom individual. We'll cross that bridge when we get to it.
The algorithm will create individuals using a SpawningPool implementation. We're going to use a special implementation inside the module pynetics.ga_bin called BinaryIndividualSpawningPool, that creates BinaryIndividual instances of the given size.
End of explanation
population_size = 10
for i in range(population_size):
individual = binary_individual_spawning_pool.spawn()
print(i, '->', individual)
Explanation: Now the spawning pool will be capable of creating individuals of the specified size. The genetic algorithm will create a population of individuals using the spawn method to populate it. We'll also specify a population size for the algorithm and see an example of population:
End of explanation
def maximize_ones_fitness(individual):
error = len(individual) - sum(individual)
return 1 / (1 + error)
Explanation: Fitness
Our individuals are solutions for the problem but, ¿how can we measure how good or how bad are they? That is what the fitness is for. It's a function that will return a float value. The bigger the value, the better the individual is.
We could use a fitness function equals to the sum of al $1$'s but if we want to stop the algorithm based on the fitness, is not the same the best fitness for an individual of size 10 than an individual of size 20.
So the fitness funcion we're gonna use is a function with the form $1 / (1 + \alpha)$, being $\alpha$ the error of our individual. The error will be computed as the number of $0$'s the individual has.
End of explanation
for i in range(population_size):
individual = binary_individual_spawning_pool.spawn()
fitness = maximize_ones_fitness(individual)
print(i, '->', individual, fitness)
Explanation: This function guarantees that the fitness will belong to the $(0, 1]$ interval. Let's see an example of how it behaves.
End of explanation
from pynetics.stop import FitnessBound
fitness_stop_condition = FitnessBound(1)
Explanation: The stop condition
Now we're gonna specify when our algorithm should stop. This is controlled by a stop condition.
End of explanation
from pynetics.selections import Tournament
tournament_selection = Tournament(2)
Explanation: Instances of the class FitnessBound are created by specifying the fitness thresshold above which we can stop our algorithm. We have specified a FitnessBound object with a thressholdd of $1$. That means that all the values below $1$ will not stop our algorithm whereas all the values upper or equal than $1$ will do.
Because our fitness value belongs to the $(0, 1]$ interval, the algorithm will stop only when the population have an individual with a fitness of $1$ (all $1$'s).
Selecting individuals
For our GA, we're going to use a tournament selection. Tournament selection works by selecting $n$ individuals randomly from the population and then returning the best of then (based on their fitnesses).
End of explanation
from pynetics.ga_list import OnePointRecombination
recombination_probability = 1
recombination = OnePointRecombination()
Explanation: Recombining
Now the recombination, i.e. the step where the individuals are selected and their genetic informatio is going to be inherited by their progeny.
We'll use a OnePointRecombination, included in the module ga_list. Also, for the recommender we'll going to specify the probability for two individuals to mate to be 1, that is, they always mate.
End of explanation
from pynetics.ga_bin import AllGenesCanSwitch
mutation_probability = 1 / individual_size
mutation = AllGenesCanSwitch()
Explanation: Mutations
The same with mutations. The mutation operator we're gona use is AllGenesCanSwitch, a mutation where for each binary gene there is a probability to be switched from $0$ to $1$ and viceversa. It belongs to the module ga_bin.
End of explanation
from pynetics.replacements import LowElitism
replacement_rate = 0.9
replacement = LowElitism()
Explanation: Replacement
Once we've got the offspring, we need to replace the population with these new borns. The operator for that matter will be a LowElitism operator, where the worst individuals of the popularin are replaced by the offspring.
We'll fix the replacement rate in $0.9$, i.e. a $90\%$ of the pooulation will be replaced for each iteration of the loop.
End of explanation
from pynetics.algorithms import SimpleGA
ga = SimpleGA(
stop_condition=fitness_stop_condition,
population_size=population_size,
fitness=maximize_ones_fitness,
spawning_pool=binary_individual_spawning_pool,
selection=tournament_selection,
recombination=recombination,
mutation=mutation,
replacement=replacement,
p_recombination=recombination_probability,
p_mutation=mutation_probability,
replacement_rate=replacement_rate,
)
Explanation: The algorithm
End of explanation
ga.run()
print(ga.best())
Explanation: Now we've created our algorithm, can run it to find the right solution. Let's see how it works
End of explanation
ga.on_start(
lambda ga: print('Starting genetic algoritm')
).on_end(
lambda ga: print('Genetic Algorithm ended. Best individual:', ga.best())
).on_step_start(
lambda ga: print('Step:', ga.generation, '->', end='')
).on_step_end(
lambda ga: print(ga.best(), 'fitness:', ga.best().fitness())
)
ga.run()
Explanation: We can specify functions to be executed while the training takes place. The next example adds some of those functions.
End of explanation |
14,706 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
20 NEWS GROUPS
Antes de nada, hay que importar los paquetes necesarios.
Step1: Lectura de los datos
A continuación, se define una función para cargar los datos que se encuentran en las carpetas data/20news-bydate-train/ y data/20news-bydate-test/. Los datos tendrán la siguiente estructura
Step2: Una vez definida la función, se cargan los datos de entrenamiento y test.
Step3: Inspección de los datos
A continuación, se inspeccionará el algún archivo del dataset para ver su contenido y su categoría
Step4: ¿Cómo se podrían obtener los nombres de las categorías de los 10 primeros archivos?
Step5: Extraer información de los archivos
Antes de aplicar técnicas de machine learning a los datasets, en este caso documentos de texto, hay que convertir dicho texto en vectores numéricos.
Procesado de texto y creación de un diccionario
Step6: Descarga de las stopwords
Step7: Aplicar las stopwords
Step8: TD-IDF
Conversión del diccionario de ocurrencias a frecuencias. El término tf-idf (term frequency-inverse document frequency) es un término estadístico que refleja cómo de importante es una palabra en un documento. Es muy utilizado en text mining.
Step9: Entrenamiento y creación de un modelo
Step10: Matriz de confusión | Python Code:
%pylab inline
from sklearn import datasets
Explanation: 20 NEWS GROUPS
Antes de nada, hay que importar los paquetes necesarios.
End of explanation
def loadDataset(directory):
dataset = datasets.load_files(directory)
print "Loaded %d documents" % len(dataset.data)
print "Loaded %d categories" % len(dataset.target_names)
print "Categories ",dataset.target_names
print
return dataset
Explanation: Lectura de los datos
A continuación, se define una función para cargar los datos que se encuentran en las carpetas data/20news-bydate-train/ y data/20news-bydate-test/. Los datos tendrán la siguiente estructura:
data/
20news-bydate-train/
category_name_1/
file_1.txt
file_2.txt
...
category_name_2/
file_1.txt
file_2.txt
...
20news-bydate-test/
category_name_1/
file_1.txt
file_2.txt
...
category_name_2/
file_1.txt
file_2.txt
...
En total, debe haber 20 categorías.
End of explanation
print "Loading data train..."
data_train = loadDataset("data/20news-bydate-train/")
print "Loading data test...."
data_test = loadDataset("data/20news-bydate-test/")
Explanation: Una vez definida la función, se cargan los datos de entrenamiento y test.
End of explanation
print "Message:"
print data_train.data[5]
print "Category: %s " % data_train.target_names[5]
Explanation: Inspección de los datos
A continuación, se inspeccionará el algún archivo del dataset para ver su contenido y su categoría:
End of explanation
print data_train.target_names[:10]
Explanation: ¿Cómo se podrían obtener los nombres de las categorías de los 10 primeros archivos?
End of explanation
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer(decode_error = 'ignore', lowercase=True, strip_accents="unicode")
X_train_counts = count_vect.fit_transform(data_train.data)
X_train_counts.shape
Explanation: Extraer información de los archivos
Antes de aplicar técnicas de machine learning a los datasets, en este caso documentos de texto, hay que convertir dicho texto en vectores numéricos.
Procesado de texto y creación de un diccionario
End of explanation
import nltk
nltk.download("stopwords")
english_stopwords = nltk.corpus.stopwords.words('english')
stopwords = set(english_stopwords)
Explanation: Descarga de las stopwords:
End of explanation
count_vect = CountVectorizer(decode_error = 'ignore', lowercase=True, strip_accents="unicode", stop_words = stopwords)
X_train_counts = count_vect.fit_transform(data_train.data)
X_train_counts.shape
Explanation: Aplicar las stopwords:
End of explanation
from sklearn.feature_extraction.text import TfidfTransformer
tf_transformer = TfidfTransformer(use_idf=False).fit(X_train_counts)
X_train_tf = tf_transformer.transform(X_train_counts)
X_train_tf.shape
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
X_train_tfidf.shape
Explanation: TD-IDF
Conversión del diccionario de ocurrencias a frecuencias. El término tf-idf (term frequency-inverse document frequency) es un término estadístico que refleja cómo de importante es una palabra en un documento. Es muy utilizado en text mining.
End of explanation
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB().fit(X_train_tfidf, data_train.target)
docs_new = ['God is love', 'OpenGL on the GPU is fast', 'I want to buy a new motorcycle']
X_new_counts = count_vect.transform(docs_new)
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
prediction = clf.predict(X_new_tfidf)
for doc, category in zip(docs_new, prediction):
print('%r => %s' % (doc, data_train.target_names[category]))
X_new_counts = count_vect.transform(data_test.data)
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
prediction = clf.predict(X_new_tfidf)
#for doc, category in zip(docs_new, predicted):
# print('%r => %s' % (doc, data_train.target_names[category]))
Explanation: Entrenamiento y creación de un modelo
End of explanation
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(data_test.target, prediction)
def plot_confusion_matrix(cm, labels, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
##print(cm_normalized)
plt.figure(figsize=(18,12))
plot_confusion_matrix(cm_normalized, data_train.target_names, title='Normalized confusion matrix')
plt.show()
Explanation: Matriz de confusión
End of explanation |
14,707 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
5 minutes to creating your first Machine Learning model
There's a number of services out there that make Machine Learning accessible to the masses by abstracting away the complexities of creating predictive models from data. Here I want to show you how to use one of them, BigML, through its API, in order to create a real estate pricing model.
The idea is that you're given characteristics of a real estate property (e.g. number of bedrooms, surface, year of construction, etc.) and you input these into a "model" that will predict the property's value. To create this model, we'll just need to use some example real-estate data that I've scraped from realtor.com using Import.io. The data contains 4776 rows (one per example property), it's available to download as a CSV file or to browse on Google Spreadsheets.
In the following, we'll see how to upload the data to BigML, which will automatically create a predictive model, and how to query this model with any given set of real estate property characteristics. Check out this blog post if you want to understand what happens behind the scenes, how Machine Learning works, and when it fails to work (http
Step1: API wrapper
We create an api object which will be used to communicate with the BigML API.
Note that BigML has two ways of functioning
Step2: 1. Create a predictive model
Specify training data to use
BigML makes a distinction between the origin of the data (the "source") and the actual data that's being used for training (the "dataset"). We first create a data source by specifying a csv file to use (hosted on Amazon S3 in this example).
Step3: API calls are asynchronous, so we use api.ok to make sure that the request has finished before we move on to the rest.
Step4: The source can be found on the BigML.com web interface at the following URL
Step5: Open the link in a new tab. If it doesn't work, check that you're logged in on the BigML.com web interface and make sure that the toggle on the right is at "development" (and not "production").
We now create a dataset.
Step6: If you click on the outputted link above, it will take you to a histogram view of the data on the BigML dashboard.
Learn a model from the data
This is done in just one command — there are no parameters to set whatsoever.
Step7: BigML uses decision tree models. The tree that's been learnt from your data can be seen at
Step8: 2. Make predictions
Let's say we want to predict the value (in USD) of a real estate property characterized by the following attributes (go on and edit the values if you want)
Step9: Let's make a prediction for this new input against the model we created
Step10: Here's the same thing on one single line | Python Code:
BIGML_USERNAME = '' # fill in your username between the quotes
BIGML_API_KEY = '' # fill in your API key
BIGML_AUTH = 'username=' + BIGML_USERNAME + ';api_key=' + BIGML_API_KEY # leave as it is
print "Authentication variables set!"
Explanation: 5 minutes to creating your first Machine Learning model
There's a number of services out there that make Machine Learning accessible to the masses by abstracting away the complexities of creating predictive models from data. Here I want to show you how to use one of them, BigML, through its API, in order to create a real estate pricing model.
The idea is that you're given characteristics of a real estate property (e.g. number of bedrooms, surface, year of construction, etc.) and you input these into a "model" that will predict the property's value. To create this model, we'll just need to use some example real-estate data that I've scraped from realtor.com using Import.io. The data contains 4776 rows (one per example property), it's available to download as a CSV file or to browse on Google Spreadsheets.
In the following, we'll see how to upload the data to BigML, which will automatically create a predictive model, and how to query this model with any given set of real estate property characteristics. Check out this blog post if you want to understand what happens behind the scenes, how Machine Learning works, and when it fails to work (http://louisdorard.com/blog/when-machine-learning-fails).
This page is interactive
The following is an IPython notebook to show you how to use the BigML API to...
create a model from data
make predictions with this model.
IPython notebooks act as interactive web-based code tutorials. They are web pages in which there are blocks of code that you can edit and run. The code is run on the same server that serves the page and the output is displayed on the page. You'll be able to edit and run the blocks of code below by positionning your cursor inside them and pressing Shift+Enter.
0. Initialize the BigML API
First of all, you should create a free BigML account at https://bigml.com/accounts/register/ (it takes 2 minutes, literally).
Authentication variables
Authentication is performed using your BigML username and API key, which can be found at https://bigml.com/account/apikey
End of explanation
# Uncomment lines below in case this block doesn't work
#import pip
#pip.main(['install', 'bigml'])
from bigml.api import BigML
# Assuming you installed the BigML Python wrappers (with the 'pip install bigml' command, see above)
# Assuming BIGML_USERNAME and BIGML_API_KEY were defined as shell environment variables
# otherwise: api=BigML('your username here','your API key here',dev_mode=True)
api=BigML(dev_mode=True) # use BigML in development mode for unlimited usage
print "Wrapper ready to use!"
Explanation: API wrapper
We create an api object which will be used to communicate with the BigML API.
Note that BigML has two ways of functioning: production mode or development mode. Here, we choose to use the latter since it's free!
End of explanation
source = api.create_source('s3://bml-data/realtor-las-vegas.csv', {"name": "Realtor LV"})
Explanation: 1. Create a predictive model
Specify training data to use
BigML makes a distinction between the origin of the data (the "source") and the actual data that's being used for training (the "dataset"). We first create a data source by specifying a csv file to use (hosted on Amazon S3 in this example).
End of explanation
api.ok(source) # shows "True" when source has been created
Explanation: API calls are asynchronous, so we use api.ok to make sure that the request has finished before we move on to the rest.
End of explanation
BIGML_AUTH = %env BIGML_AUTH
print "https://bigml.com/dashboard/"+str(source['resource'])+"?"+BIGML_AUTH
Explanation: The source can be found on the BigML.com web interface at the following URL:
End of explanation
dataset = api.create_dataset(source, {"name": "Realtor LV dataset"})
api.ok(dataset)
print "Dataset ready and available at https://bigml.com/dashboard/"+str(dataset['resource'])+"?"+BIGML_AUTH
Explanation: Open the link in a new tab. If it doesn't work, check that you're logged in on the BigML.com web interface and make sure that the toggle on the right is at "development" (and not "production").
We now create a dataset.
End of explanation
model = api.create_model(dataset)
print "'model' object created!"
Explanation: If you click on the outputted link above, it will take you to a histogram view of the data on the BigML dashboard.
Learn a model from the data
This is done in just one command — there are no parameters to set whatsoever.
End of explanation
api.ok(model) # making sure the model is ready
print "Model ready and available at https://bigml.com/dashboard/"+str(model['resource'])+"?"+BIGML_AUTH
Explanation: BigML uses decision tree models. The tree that's been learnt from your data can be seen at:
End of explanation
# the strings below correspond to headers of the realtor-las-vegas.csv file we used to create the model
new_input = {"bedrooms": 4, "full_bathrooms": 2, "type": "Single Family Home", "size_sqft": 1500}
print "'new_input' object created!"
Explanation: 2. Make predictions
Let's say we want to predict the value (in USD) of a real estate property characterized by the following attributes (go on and edit the values if you want):
End of explanation
prediction = api.create_prediction(model, new_input)
print "Prediction: ",prediction['object']['output']
Explanation: Let's make a prediction for this new input against the model we created:
End of explanation
print "Value: ",api.create_prediction(model, {"bedrooms": 4, "full_bathrooms": 4, "type": "Single Family Home", "size_sqft": 1500})['object']['output']," USD"
Explanation: Here's the same thing on one single line:
End of explanation |
14,708 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step2: Más Álgebra lineal con Python
Esta notebook fue creada originalmente como un blog post por Raúl E. López Briega en Matemáticas, análisis de datos y python. El contenido esta bajo la licencia BSD.
<img alt="Algebra lineal" title="Algebra lineal" src="https
Step3: Combinaciones lineales
Cuando trabajamos con vectores, nos vamos a encontrar con dos operaciones fundamentales, la suma o <a href="https
Step4: Cuando multiplicamos vectores por <a href="https
Step5: Cuando combinamos estas dos operaciones, formamos lo que se conoce en Álgebra lineal como combinaciones lineales. Es decir que una combinación lineal va a ser una expresión matemática construida sobre un conjunto de vectores, en el que cada vector es multiplicado por un <a href="https
Step6: La matriz identidad , la matriz transpuesta y la matriz invertible
Tres <a href="https
Step7: La matriz invertible es muy importante, ya que esta relacionada con la ecuación $Ax = b$. Si tenemos una matriz cuadrada $A$ de $n \times n$, entonces la matriz inversa de $A$ es una <a href="https
Step8: Espacios vectoriales
Las Matemáticas derivan su poder en gran medida de su capacidad para encontrar las características comunes de los diversos problemas y estudiarlos de manera abstracta. Existen muchos problemas que implican los conceptos relacionados de <a href="https
Step9: Como podemos ver, tanto por la solución numérica como por la solución gráfica, estos vectores son linealmente independientes, ya que la única solución a la ecuación $\alpha_1 x_1 + \alpha_2 x_2 + \dots + \alpha_n x_n = 0$, es aquella en que los <a href="https
Step10: Como vemos, esta solución es no trivial, ya que por ejemplo existe la solución $\alpha_1 = 1, \ \alpha_2 = -2 , \ \alpha_3 = 1$ en la que los <a href="https
Step11: Como vemos, todos los <a href="https
Step12: Otro espacio de suma importancia es el espacio columna. El espacio columna, $C(A)$, consiste en todas las combinaciones lineales de las columnas de una <a href="https
Step13: Rango
Otro concepto que también esta ligado a la independencia lineal es el de <a href="https
Step14: Una útil aplicación de calcular el <a href="https
Step15: En esta definición podemos observar que $a^2 + b^2 = v \cdot v$, por lo que ya estamos en condiciones de poder definir lo que en Álgebra lineal se conoce como norma.
El largo o norma de un vector $v = \begin{bmatrix} v_1 \ v_2 \ \vdots \ v_n \end{bmatrix}$, en $\mathbb{R}^n$ va a ser igual a un número no negativo $||v||$ definido por
Step16: Un conjunto de vectores en $\mathbb{R}^n$ va a ser <a href="https
Step17: Como vemos, este conjunto es <a href="https
Step18: Eigenvalores y Eigenvectores
Cuando estamos resolviendo ecuaciones lineales del tipo $Ax = b$, estamos trabajando con problemas estáticos. ¿Pero qué pasa si quisiéramos trabajar con problemas dinámicos?. Es en este tipo de situaciones donde los Eigenvalores y Eigenvectores tienen su mayor importancia.
Supongamos que tenemos una matriz cuadrada $A$ de $n \times n$. Una pregunta natural que nos podríamos hacer sobre $A$ es | Python Code:
# <!-- collapse=True -->
# importando modulos necesarios
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg
import scipy.linalg as la
import sympy
# imprimir con notación matemática.
sympy.init_printing(use_latex='mathjax')
# <!-- collapse=True -->
# graficando vector en R^2 [2, 4]
def move_spines():
Crea la figura de pyplot y los ejes. Mueve las lineas de la izquierda
y de abajo para que se intersecten con el origen. Elimina las lineas de
la derecha y la de arriba. Devuelve los ejes.
fix, ax = plt.subplots()
for spine in ["left", "bottom"]:
ax.spines[spine].set_position("zero")
for spine in ["right", "top"]:
ax.spines[spine].set_color("none")
return ax
def vect_fig(vector, color):
Genera el grafico de los vectores en el plano
v = vector
ax.annotate(" ", xy=v, xytext=[0, 0], color=color,
arrowprops=dict(facecolor=color,
shrink=0,
alpha=0.7,
width=0.5))
ax.text(1.1 * v[0], 1.1 * v[1], v)
ax = move_spines()
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.grid()
vect_fig([2, 4], "blue")
Explanation: Más Álgebra lineal con Python
Esta notebook fue creada originalmente como un blog post por Raúl E. López Briega en Matemáticas, análisis de datos y python. El contenido esta bajo la licencia BSD.
<img alt="Algebra lineal" title="Algebra lineal" src="https://relopezbriega.github.io/images/lin-alg.jpg">
Introducción
El Álgebra lineal constituye la base de gran parte de las matemáticas modernas, ya sea en su fase teórica, aplicada, o computacional. Es un área activa que tiene conexiones con muchas áreas dentro y fuera de las matemáticas, como ser: el análisis funcional, las ecuaciones diferenciales, la investigación operativa, la econometría y la ingeniería. Es por esto, que se vuelve sumamente importante conocer sus métodos en profundidad.
La idea de este artículo, es profundizar alguno de los temas que ya vimos en mi artículo anterior (Álgebra lineal con Python), presentar algunos nuevos, e ilustrar la utilidad de esta rama de la matemáticas con alguna de sus aplicaciones.
Campos
Un <a href="https://es.wikipedia.org/wiki/Cuerpo_(matem%C3%A1ticas)">Campo</a>, $F$, es una estructura algebraica en la cual las operaciones de <a href="https://es.wikipedia.org/wiki/Adici%C3%B3n_(matem%C3%A1ticas)">adición</a> y multiplicación se pueden realizar y cumplen con las siguientes propiedades:
La propiedad conmutativa tanto para la <a href="https://es.wikipedia.org/wiki/Adici%C3%B3n_(matem%C3%A1ticas)">adición</a> como para la multiplicación; es decir: $a + b = b + a$; y $a \cdot b = b \cdot a$; para todo $a, b \in F$
La <a href="https://es.wikipedia.org/wiki/Asociatividad_(%C3%A1lgebra)">propiedad asociativa</a>, tanto para la <a href="https://es.wikipedia.org/wiki/Adici%C3%B3n_(matem%C3%A1ticas)">adición</a> como para la multiplicación; es decir: $(a + b) + c = a + (b + c)$; y $(a \cdot b) \cdot c = a \cdot (b \cdot c)$; para todo $a, b, c \in F$
La propiedad distributiva de la multiplicación sobre la <a href="https://es.wikipedia.org/wiki/Adici%C3%B3n_(matem%C3%A1ticas)">adición</a>; es decir: $a \cdot (b + c) = a \cdot b + a \cdot c$; para todo $a, b, c \in F$
La existencia de un elemento neutro tanto para la <a href="https://es.wikipedia.org/wiki/Adici%C3%B3n_(matem%C3%A1ticas)">adición</a> como para la multiplicación; es decir: $a + 0 = a$; y $a \cdot 1 = a$; para todo $a \in F$.
La existencia de un elemento inverso tanto para la <a href="https://es.wikipedia.org/wiki/Adici%C3%B3n_(matem%C3%A1ticas)">adición</a> como para la multiplicación; es decir: $a + (-a) = 0$; y $a \cdot a^{-1} = 1$; para todo $a \in F$ y $a \ne 0$.
Dos de los <a href="https://es.wikipedia.org/wiki/Cuerpo_(matem%C3%A1ticas)">Campos</a> más comunes con los que nos vamos a encontrar al trabajar en problemas de Álgebra lineal, van a ser el conjunto de los números reales, $\mathbb{R}$; y el conjunto de los números complejos, $\mathbb{C}$.
Vectores
Muchas nociones físicas, tales como las fuerzas, velocidades y aceleraciones, involucran una magnitud (el valor de la fuerza, velocidad o aceleración) y una dirección. Cualquier entidad que involucre magnitud y dirección se llama vector. Los vectores se representan por flechas en las que la longitud de ellas define la magnitud; y la dirección de la flecha representa la dirección del vector. Podemos pensar en los vectores como una serie de números. Éstos números tienen una orden preestablecido, y podemos identificar cada número individual por su índice en ese orden. Los vectores identifican puntos en el espacio, en donde cada elemento representa una coordenada del eje en el espacio. La típica forma de representarlos es la siguiente:
$$v = \left[ \begin{array}{c} x_1 \ x_2 \ \vdots \ x_n \end{array} \right]$$
Geométricamente podemos representarlos del siguiente modo en el plano de 2 dimensiones:
End of explanation
# <!-- collapse=True -->
# graficando suma de vectores en R^2
# [2, 4] + [2, -2]
ax = move_spines()
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.grid()
vecs = [[2, 4], [2, -2]] # lista de vectores
for v in vecs:
vect_fig(v, "blue")
v = np.array([2, 4]) + np.array([2, -2])
vect_fig(v, "red")
ax.plot([2, 4], [-2, 2], linestyle='--')
a =ax.plot([2, 4], [4, 2], linestyle='--' )
Explanation: Combinaciones lineales
Cuando trabajamos con vectores, nos vamos a encontrar con dos operaciones fundamentales, la suma o <a href="https://es.wikipedia.org/wiki/Adici%C3%B3n_(matem%C3%A1ticas)">adición</a>; y la multiplicación por <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalares</a>. Cuando sumamos dos vectores $v$ y $w$, sumamos elemento por elemento, del siguiente modo:
$$v + w = \left[
\begin{array}{c}
v_1 \
v_2 \
\vdots \
v_n
\end{array}
\right] + \left[
\begin{array}{c}
w_1 \
w_2 \
\vdots \
w_n
\end{array}
\right] =
\left[
\begin{array}{c}
v_1 + w_1 \
v_2 + w_2 \
\vdots \
v_n + w_n
\end{array}
\right]$$
Geométricamente lo podemos ver representado del siguiente modo:
End of explanation
# <!-- collapse=True -->
# graficando multiplicación por escalares en R^2
# [2, 3] * 2
ax = move_spines()
ax.set_xlim(-6, 6)
ax.set_ylim(-6, 6)
ax.grid()
v = np.array([2, 3])
vect_fig(v, "blue")
v = v * 2
vect_fig(v, "red")
Explanation: Cuando multiplicamos vectores por <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalares</a>, lo que hacemos es tomar un número $\alpha$ y un vector $v$; y creamos un nuevo vector $w$ en el cada elemento de $v$ es multiplicado por $\alpha$ del siguiente modo:
$$\begin{split}\alpha v = \left[
\begin{array}{c}
\alpha v_1 \
\alpha v_2 \
\vdots \
\alpha v_n
\end{array}
\right]\end{split}$$
Geométricamente podemos representar a esta operación en el plano de 2 dimensiones del siguiente modo:
End of explanation
# Resolviendo sistema de ecuaciones con SymPy
A = sympy.Matrix(( (2, 3, 5), (3, 6, 2), (8, 3, 6) ))
A
b = sympy.Matrix(3,1,(52,61,75))
b
# Resolviendo Ax = b
x = A.LUsolve(b)
x
# Comprobando la solución
A*x
Explanation: Cuando combinamos estas dos operaciones, formamos lo que se conoce en Álgebra lineal como combinaciones lineales. Es decir que una combinación lineal va a ser una expresión matemática construida sobre un conjunto de vectores, en el que cada vector es multiplicado por un <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalar</a> y los resultados son luego sumados. Matemáticamente lo podemos expresar de la siguiente forma:
$$w = \alpha_1 v_1 + \alpha_2 v_2 + \dots + \alpha_n v_n = \sum_{i=1}^n \alpha_i v_i
$$
en donde, $v_n$ son vectores y $\alpha_n$ son <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalares</a>.
Matrices, combinaciones lineales y Ax = b
Una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> es un arreglo bidimensional de números ordenados en filas y columnas, donde una fila es cada una de las líneas horizontales de la matriz y una columna es cada una de las líneas verticales. En una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> cada elemento puede ser identificado utilizando dos índices, uno para la fila y otro para la columna en que se encuentra. Las podemos representar de la siguiente manera:
$$A=\begin{bmatrix}a_{11} & a_{12} & \dots & a_{1n}\a_{21} & a_{22} & \dots & a_{2n}
\ \vdots & \vdots & \ddots & \vdots \
a_{n1} & a_{n2} & \dots & a_{nn}\end{bmatrix}$$
Las <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matrices</a> se utilizan para múltiples aplicaciones y sirven, en particular, para representar los coeficientes de los sistemas de ecuaciones lineales o para representar combinaciones lineales.
Supongamos que tenemos los siguientes 3 vectores:
$$x_1 = \left[
\begin{array}{c}
1 \
-1 \
0
\end{array}
\right]
\
x_2 = \left[
\begin{array}{c}
0 \
1 \
-1
\end{array}
\right] \
x_3 = \left[
\begin{array}{c}
0 \
0 \
1
\end{array}
\right]$$
su combinación lineal en el espacio de 3 dimensiones va a ser igual a $\alpha_1 x_1 + \alpha_2 x_2 + \alpha_3 x_3$; lo que es lo mismo que decir:
$$\alpha_1 \left[ \begin{array}{c} 1 \ -1 \ 0
\end{array}
\right] + \alpha_2
\left[
\begin{array}{c}
0 \
1 \
-1
\end{array}
\right] + \alpha_3
\left[
\begin{array}{c}
0 \
0 \
1
\end{array}
\right] = \left[
\begin{array}{c}
\alpha_1 \
\alpha_2 - \alpha_1 \
\alpha_3 - \alpha_2
\end{array}
\right]$$
Ahora esta combinación lineal la podríamos reescribir en forma matricial. Los vectores $x_1, x_2$ y $x_3$, pasarían a formar las columnas de la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A$ y los <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalares</a> $\alpha_1, \alpha_2$ y $\alpha_3$ pasarían a ser los componentes del vector $x$ del siguiente modo:
$$\begin{bmatrix}1 & 0 & 0\-1 & 1 & 0
\ 0 & -1 & 1\end{bmatrix}\begin{bmatrix} \alpha_1 \ \alpha_2 \ \alpha_3\end{bmatrix}=
\begin{bmatrix}\alpha_1 \ \alpha_2 - \alpha_1 \ \alpha_3 - \alpha_2 \end{bmatrix}$$
De esta forma la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A$ multiplicada por el vector $x$, nos da como resultado la misma combinación lineal $b$. De esta forma, arribamos a una de las ecuaciones más fundamentales del Álgebra lineal:
$$Ax = b$$
Esta ecuación no solo nos va a servir para expresar combinaciones lineales, sino que también se vuelve de suma importancia a la hora de resolver sistemas de ecuaciones lineales, en dónde $b$ va a ser conocido y la incógnita pasa a ser $x$. Por ejemplo, supongamos que queremos resolver el siguiente sistemas de ecuaciones de 3 incógnitas:
$$ 2x_1 + 3x_2 + 5x_3 = 52 \
3x_1 + 6x_2 + 2x_3 = 61 \
8x_1 + 3x_2 + 6x_3 = 75
$$
Podemos ayudarnos de SymPy para expresar a la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A$ y $b$ para luego arribar a la solución del vector $x$.
End of explanation
# Matriz transpuesta
A = sympy.Matrix( [[ 2,-3,-8, 7],
[-2,-1, 2,-7],
[ 1, 0,-3, 6]] )
A
A.transpose()
# transpuesta de transpuesta vuelve a A.
A.transpose().transpose()
# creando matriz simetrica
As = A*A.transpose()
As
# comprobando simetria.
As.transpose()
Explanation: La matriz identidad , la matriz transpuesta y la matriz invertible
Tres <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matrices</a> de suma importancia en problemas de Álgebra lineal. Son la matriz identidad, la matriz transpuesta y la matriz invertible.
La matriz identidad es el elemento neutro en la multiplicación de matrices, es el equivalente al número 1. Cualquier <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> multiplicada por la matriz identidad nos da como resultado la misma <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a>. La matriz identidad es una matriz cuadrada (tiene siempre el mismo número de filas que de columnas); y su diagonal principal se compone de todos elementos 1 y el resto de los elementos se completan con 0. Suele representase con la letra $I$.
Por ejemplo la matriz identidad de 3x3 sería la siguiente:
$$I=\begin{bmatrix}1 & 0 & 0 & \0 & 1 & 0\ 0 & 0 & 1\end{bmatrix}$$
La matriz transpuesta de una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A$ de $m \times n$ va a ser igual a la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $n \times m$ $A^T$, la cual se obtiene al transformar las filas en columnas y las columnas en filas, del siguiente modo:
$$\begin{bmatrix}a & b & \c & d & \ e & f & \end{bmatrix}^T=
\begin{bmatrix}a & c & e &\b & d & f & \end{bmatrix}$$
Una matriz cuadrada va a ser simétrica si $A^T = A$, es decir si $A$ es igual a su propia matriz transpuesta.
Algunas de las propiedades de las matrices transpuestas son:
a. $(A^T)^T = A$
b. $(A + B)^T = A^T + B^T$
c. $k(A)^T = k(A^T)$
d. $(AB)^T = B^T A^T$
e. $(A^r)^T = (A^T)^r$ para todos los $r$ no negativos.
f. Si $A$ es una matriz cuadrada, entonces $A + A^T$ es una matriz simétrica.
g. Para cualquier <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A$, $A A^T$ y $A^T A$ son matrices simétricas.
Veamos algunos ejemplos en Python
End of explanation
# Matriz invertible
A = sympy.Matrix( [[1,2],
[3,9]] )
A
A_inv = A.inv()
A_inv
# A * A_inv = I
A*A_inv
# forma escalonada igual a indentidad.
A.rref()
# la inversa de A_inv es A
A_inv.inv()
Explanation: La matriz invertible es muy importante, ya que esta relacionada con la ecuación $Ax = b$. Si tenemos una matriz cuadrada $A$ de $n \times n$, entonces la matriz inversa de $A$ es una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A'$ o $A^{-1}$ de $n \times n$ que hace que la multiplicación $A A^{-1}$ sea igual a la matriz identidad $I$. Es decir que es la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> recíproca de $A$.
$A A^{-1} = I$ o $A^{-1} A = I$
En caso de que estas condiciones se cumplan, decimos que la matriz es invertible.
Que una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> sea invertible tiene importantes implicaciones, como ser:
a. Si $A$ es una matriz invertible, entonces su matriz inversa es única.
b. Si $A$ es una matriz invertible de $n \times n$, entonces el sistemas de ecuaciones lineales dado por $Ax = b$ tiene una única solución $x = A^{-1}b$ para cualquier $b$ en $\mathbb{R}^n$.
c. Una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> va a ser invertible si y solo si su <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> es distinto de cero. En el caso de que el <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> sea cero se dice que la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> es singular.
d. Si $A$ es una matriz invertible, entonces el sistema $Ax = 0$ solo tiene una solución trivial. Es decir, en las que todas las incógnitas son ceros.
e. Si $A$ es una matriz invertible, entonces su forma escalonada va a ser igual a la matriz identidad.
f. Si $A$ es una matriz invertible, entonces $A^{-1}$ es invertible y:
$$(A^{-1})^{-1} = A$$
g. Si $A$ es una matriz invertible y $\alpha$ es un <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalar</a> distinto de cero, entonces $\alpha A$ es invertible y:
$$(\alpha A)^{-1} = \frac{1}{\alpha}A^{-1}$$.
h. Si $A$ y $B$ son matrices invertibles del mismo tamaño, entonces $AB$ es invertible y:
$$(AB)^{-1} = B^{-1} A^{-1}$$.
i. Si $A$ es una matriz invertible, entonces $A^T$ es invertible y:
$$(A^T)^{-1} = (A^{-1})^T$$.
Con SymPy podemos trabajar con las matrices invertibles del siguiente modo:
End of explanation
# Resolviendo el sistema de ecuaciones.
A = np.array([[1.2, -2.2],
[1.1, 1.4]])
b = np.array([0., 0.])
x = np.linalg.solve(A, b)
x
# <!-- collapse=True -->
# Solución gráfica.
x_vals = np.linspace(-5, 5, 50) # crea 50 valores entre 0 y 5
ax = move_spines()
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
ax.grid()
ax.plot(x_vals, (1.2 * x_vals) / -2.2) # grafica 1.2x_1 - 2.2x_2 = 0
a = ax.plot(x_vals, (1.1 * x_vals) / 1.4) # grafica 1.1x + 1.4x_2 = 0
Explanation: Espacios vectoriales
Las Matemáticas derivan su poder en gran medida de su capacidad para encontrar las características comunes de los diversos problemas y estudiarlos de manera abstracta. Existen muchos problemas que implican los conceptos relacionados de <a href="https://es.wikipedia.org/wiki/Adici%C3%B3n_(matem%C3%A1ticas)">adición</a>, multiplicación por <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalares</a>, y la linealidad. Para estudiar estas propiedades de manera abstracta, debemos introducir la noción de espacio vectorial.
Para alcanzar la definición de un espacio vectorial, debemos combinar los conceptos que venimos viendo hasta ahora de <a href="https://es.wikipedia.org/wiki/Cuerpo_(matem%C3%A1ticas)">Campo</a>, vector y las operaciones de <a href="https://es.wikipedia.org/wiki/Adici%C3%B3n_(matem%C3%A1ticas)">adición</a>; y multiplicación por <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalares</a>. De esta forma un espacio vectorial, $V$, sobre un <a href="https://es.wikipedia.org/wiki/Cuerpo_(matem%C3%A1ticas)">Campo</a>, $F$, va a ser un conjunto en el que están definidas las operaciones de <a href="https://es.wikipedia.org/wiki/Adici%C3%B3n_(matem%C3%A1ticas)">adición</a> y multiplicación por <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalares</a>, tal que para cualquier par de elementos $x$ e $y$ en $V$, existe un elemento único $x + y$ en $V$, y para cada elemento $\alpha$ en $F$ y cada elemento $x$ en $V$, exista un único elemento $\alpha x$ en $V$, de manera que se cumplan las siguientes condiciones:
Para todo $x, y$ en $V$, $x + y = y + x$ (conmutatividad de la adición).
Para todo $x, y, z$ en $V$, $(x + y) + z = x + (y + z)$. (<a href="https://es.wikipedia.org/wiki/Asociatividad_(%C3%A1lgebra)">asociatividad</a> de la adición).
Existe un elemento en $V$ llamado $0$ tal que $x + 0 = x$ para todo $x$ en $V$.
Para cada elemento $x$ en $V$, existe un elemento $y$ en $V$ tal que $x + y = 0$.
Para cada elemento $x$ en $V$, $1 x = x$.
Para cada par, $\alpha, \beta$ en $F$ y cada elemento $x$ en $V$, $(\alpha \beta) x = \alpha (\beta x)$.
Para cada elemento $\alpha$ en $F$ y cada para de elementos $x, y$ en $V$, $\alpha(x + y) = \alpha x + \alpha y$.
Para cada par de elementos $\alpha, \beta$ en $F$ y cada elemento $x$ en $V$, $(\alpha + \beta)x = \alpha x + \beta x$.
Los espacios vectoriales más comunes son $\mathbb{R}^2$, el cual representa el plano de 2 dimensiones y consiste de todos los pares ordenados de los números reales:
$$\mathbb{R}^2 = {(x, y): x, y \in \mathbb{R}}$$
y $\mathbb{R}^3$, que representa el espacio ordinario de 3 dimensiones y consiste en todos los tríos ordenados de los números reales:
$$\mathbb{R}^3 = {(x, y, z): x, y, z \in \mathbb{R}}$$
Una de las grandes bellezas del Álgebra lineal es que podemos fácilmente pasar a trabajar sobre espacios de $n$ dimensiones, $\mathbb{R}^n$!
Tampoco tenemos porque quedarnos con solo los números reales, ya que la definición que dimos de un espacio vectorial reside sobre un <a href="https://es.wikipedia.org/wiki/Cuerpo_(matem%C3%A1ticas)">Campo</a>; y los <a href="https://es.wikipedia.org/wiki/Cuerpo_(matem%C3%A1ticas)">campos</a> pueden estar representados por números complejos. Por tanto también podemos tener espacios vectoriales $\mathbb{C}^2, \mathbb{C}^3, \dots, \mathbb{C}^n$.
Subespacios
Normalmente, en el estudio de cualquier estructura algebraica es interesante examinar subconjuntos que tengan la misma estructura que el conjunto que esta siendo considerado. Así, dentro de los espacios vectoriales, podemos tener subespacios vectoriales, los cuales son un subconjunto que cumplen con las mismas propiedades que el espacio vectorial que los contiene. De esta forma, $\mathbb{R}^3$ representa un subespacio del espacio vectorial $\mathbb{R}^n$.
Independencia lineal
La independencia lineal es un concepto aparentemente simple con consecuencias que se extienden profundamente en muchos aspectos del análisis. Si deseamos entender cuando una matriz puede ser invertible, o cuando un sistema de ecuaciones lineales tiene una única solución, o cuando una estimación por mínimos cuadrados se define de forma única, la idea fundamental más importante es la de independencia lineal de vectores.
Dado un conjunto finito de vectores $x_1, x_2, \dots, x_n$ se dice que los mismos son linealmente independientes, si y solo si, los únicos <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalares</a> $\alpha_1, \alpha_2, \dots, \alpha_n$ que satisfacen la ecuación:
$$\alpha_1 x_1 + \alpha_2 x_2 + \dots + \alpha_n x_n = 0$$
son todos ceros, $\alpha_1 = \alpha_2 = \dots = \alpha_n = 0$.
En caso de que esto no se cumpla, es decir, que existe una solución a la ecuación de arriba en que no todos los <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalares</a> son ceros, a esta solución se la llama no trivial y se dice que los vectores son linealmente dependientes.
Para ilustrar la definición y que quede más clara, veamos algunos ejemplos. Supongamos que queremos determinar si los siguientes vectores son linealmente independientes:
$$\begin{split}x_1 = \left[ \begin{array}{c} 1.2 \ 1.1 \ \end{array}
\right] \ \ \ x_2 = \left[ \begin{array}{c} -2.2 \ 1.4 \
\end{array}
\right]\end{split}$$
Para lograr esto, deberíamos resolver el siguiente sistema de ecuaciones y verificar si la única solución es aquella en que los <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalares</a> sean ceros.
$$\begin{split}\alpha_1
\left[
\begin{array}{c}
1.2 \
1.1 \
\end{array}
\right] + \alpha_2
\left[
\begin{array}{c}
-2.2 \
1.4 \
\end{array}
\right]\end{split} = 0
$$
Para resolver este sistema de ecuaciones, podemos recurrir a la ayuda de Python.
End of explanation
# Sympy para resolver el sistema de ecuaciones lineales
a1, a2, a3 = sympy.symbols('a1, a2, a3')
A = sympy.Matrix(( (3, 3, 3, 0), (2, 2, 2, 0), (2, 1, 0, 0), (3, 2, 1, 0) ))
A
sympy.solve_linear_system(A, a1, a2, a3)
Explanation: Como podemos ver, tanto por la solución numérica como por la solución gráfica, estos vectores son linealmente independientes, ya que la única solución a la ecuación $\alpha_1 x_1 + \alpha_2 x_2 + \dots + \alpha_n x_n = 0$, es aquella en que los <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalares</a> son cero.
Determinemos ahora si por ejemplo, los siguientes vectores en $\mathbb{R}^4$ son linealmente independientes: ${(3, 2, 2, 3), (3, 2, 1, 2), (3, 2, 0, 1)}$. Aquí, ahora deberíamos resolver la siguiente ecuación:
$$\alpha_1 (3, 2, 2, 3) +\alpha_2 (3, 2, 1, 2) + \alpha_3 (3, 2, 0, 1) = (0, 0, 0, 0)$$
Para resolver este sistema de ecuaciones que no es cuadrado (tiene 4 ecuaciones y solo 3 incógnitas); podemos utilizar SymPy.
End of explanation
A = sympy.Matrix(( (1, 1, 1, 0), (-2, 1, 1, 0), (-1, 2, 0, 0) ))
A
sympy.solve_linear_system(A, a1, a2, a3)
Explanation: Como vemos, esta solución es no trivial, ya que por ejemplo existe la solución $\alpha_1 = 1, \ \alpha_2 = -2 , \ \alpha_3 = 1$ en la que los <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalares</a> no son ceros. Por lo tanto este sistema es linealmente dependiente.
Por último, podríamos considerar si los siguientes polinomios son linealmente independientes: $1 -2x -x^2$, $1 + x$, $1 + x + 2x^2$. En este caso, deberíamos resolver la siguiente ecuación:
$$\alpha_1 (1 − 2x − x^2) + \alpha_2 (1 + x) + \alpha_3 (1 + x + 2x^2) = 0$$
y esta ecuación es equivalente a la siguiente:
$$(\alpha_1 + \alpha_2 + \alpha_3 ) + (−2 \alpha_1 + \alpha_2 + \alpha_3 )x + (−\alpha_1 + 2 \alpha_2 )x^2 = 0$$
Por lo tanto, podemos armar el siguiente sistema de ecuaciones:
$$\alpha_1 + \alpha_2 + \alpha_3 = 0, \
-2 \alpha_1 + \alpha_2 + \alpha_3 = 0, \
-\alpha_1 + 2 \alpha_2 = 0.
$$
El cual podemos nuevamente resolver con la ayuda de SymPy.
End of explanation
# Espacio nulo de un matriz
A = sympy.Matrix(((1, 5, 7), (0, 0, 9)))
A
# Calculando el espacio nulo
x = A.nullspace()
x
# Comprobando la solución
A_aum = sympy.Matrix(((1, 5, 7, 0), (0, 0, 9, 0)))
sympy.solve_linear_system(A_aum, a1, a2, a3)
# Comprobación con numpy
A = np.array([[1, 5, 7],
[0, 0, 9]])
x = np.array([[-5],
[1],
[0]])
A.dot(x)
Explanation: Como vemos, todos los <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalares</a> son ceros, por lo tanto estos polinomios son linealmente independientes.
Espacio nulo, espacio columna y espacio fila
Un termino particularmente relacionado con la independencia lineal es el de <a href="https://es.wikipedia.org/wiki/N%C3%BAcleo_(matem%C3%A1tica)">espacio nulo o núcleo</a>. El <a href="https://es.wikipedia.org/wiki/N%C3%BAcleo_(matem%C3%A1tica)">espacio nulo</a> de una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A$, el cual lo vamos a expresar como $N(A)$, va a consistir de todas las soluciones a la ecuación fundamental $Ax = 0$. Por supuesto, una solución inmediata a esta ecuación es el caso de $x = 0$, que ya vimos que establece la independencia lineal. Esta solución solo va a ser la única que exista para los casos de matrices invertibles. Pero en el caso de las matrices singulares (aquellas que no son invertibles, que tienen <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> igual a cero), van a existir soluciones que no son cero para la ecuación $Ax = 0$. El conjunto de todas estas soluciones, va a representar el <a href="https://es.wikipedia.org/wiki/N%C3%BAcleo_(matem%C3%A1tica)">espacio nulo</a>.
Para encontrar el <a href="https://es.wikipedia.org/wiki/N%C3%BAcleo_(matem%C3%A1tica)">espacio nulo</a> también nos podemos ayudar de SymPy.
End of explanation
# A.rref() forma escalonada.
A = sympy.Matrix( [[2,-3,-8, 7],
[-2,-1,2,-7],
[1 ,0,-3, 6]])
A.rref() # [0, 1, 2] es la ubicación de las pivot.
# Espacio columna
[ A[:,c] for c in A.rref()[1] ]
# Espacio fila
[ A.rref()[0][r,:] for r in A.rref()[1] ]
Explanation: Otro espacio de suma importancia es el espacio columna. El espacio columna, $C(A)$, consiste en todas las combinaciones lineales de las columnas de una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A$. Estas combinaciones son los posibles vectores $Ax$. Este espacio es fundamental para resolver la ecuación $Ax = b$; ya que para resolver esta ecuación debemos expresar a $b$ como una combinación de columnas. El sistema $Ax = b$, va a tener solución solamente si $b$ esta en el espacio columna de $A$. Como las <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matrices</a> tienen la forma $m \times n$, sus columnas tienen $m$ componentes ($n$ son las filas). Por lo tanto el espacio columna es un subespacio de $\mathbb{R}^m$ y no $\mathbb{R}^n$.
Por último, el otro espacio que conforma los espacios fundamentales de una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a>, es el espacio fila, el cual esta constituido por las combinaciones lineales de las filas de una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a>.
Para obtener estos espacios, nuevamente podemos recurrir a SymPy. Para poder obtener estos espacios, primero vamos a tener que obtener la forma escalonada de la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a>, la cual es la forma a la que arribamos luego del proceso de eliminación.
End of explanation
# Calculando el rango con SymPy
A = sympy.Matrix([[1, 1, 2, 4],
[1, 2, 2, 5],
[1, 3, 2, 6]])
A
# Rango con SymPy
A.rank()
# Rango con numpy
A = np.array([[1, 1, 2, 4],
[1, 2, 2, 5],
[1, 3, 2, 6]])
np.linalg.matrix_rank(A)
Explanation: Rango
Otro concepto que también esta ligado a la independencia lineal es el de <a href="https://es.wikipedia.org/wiki/Rango_(%C3%A1lgebra_lineal)">rango</a>. Los números de columnas $m$ y filas $n$ pueden darnos el tamaño de una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a>, pero esto no necesariamente representa el verdadero tamaño del sistema lineal, ya que por ejemplo si existen dos filas iguales en una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A$, la segunda fila desaparecía en el proceso de eliminación. El verdadero tamaño de $A$ va a estar dado por su <a href="https://es.wikipedia.org/wiki/Rango_(%C3%A1lgebra_lineal)">rango</a>. El <a href="https://es.wikipedia.org/wiki/Rango_(%C3%A1lgebra_lineal)">rango</a> de una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> es el número máximo de columnas (filas respectivamente) que son linealmente independientes. Por ejemplo si tenemos la siguiente <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> de 3 x 4:
$$A = \begin{bmatrix}1 & 1 & 2 & 4\1 & 2 & 2 & 5
\ 1 & 3 & 2 & 6\end{bmatrix}$$
Podemos ver que la tercer columna $(2, 2, 2)$ es un múltiplo de la primera y que la cuarta columna $(4, 5, 6)$ es la suma de las primeras 3 columnas. Por tanto el <a href="https://es.wikipedia.org/wiki/Rango_(%C3%A1lgebra_lineal)">rango</a> de $A$ va a ser igual a 2; ya que la tercer y cuarta columna pueden ser eliminadas.
Obviamente, el <a href="https://es.wikipedia.org/wiki/Rango_(%C3%A1lgebra_lineal)">rango</a> también lo podemos calcular con la ayuda de Python.
End of explanation
# <!-- collapse=True -->
# Calculando largo de un vector
# forma un triángulo rectángulo
ax = move_spines()
ax.set_xlim(-6, 6)
ax.set_ylim(-6, 6)
ax.grid()
v = np.array([4, 6])
vect_fig(v, "blue")
a = ax.vlines(x=v[0], ymin=0, ymax = 6, linestyle='--', color='g')
Explanation: Una útil aplicación de calcular el <a href="https://es.wikipedia.org/wiki/Rango_(%C3%A1lgebra_lineal)">rango</a> de una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> es la de determinar el número de soluciones al sistema de ecuaciones lineales, de acuerdo al enunciado del Teorema de Rouché–Frobenius. El sistema tiene por lo menos una solución si el <a href="https://es.wikipedia.org/wiki/Rango_(%C3%A1lgebra_lineal)">rango</a> de la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> de coeficientes equivale al <a href="https://es.wikipedia.org/wiki/Rango_(%C3%A1lgebra_lineal)">rango</a> de la matriz aumentada. En ese caso, ésta tiene exactamente una solución si el rango equivale al número de incógnitas.
La norma y la Ortogonalidad
Si quisiéramos saber cual es el largo del un vector, lo único que necesitamos es el famoso teorema de Pitágoras. En el plano $\mathbb{R}^2$, el largo de un vector $v=\begin{bmatrix}a \ b \end{bmatrix}$ va a ser igual a la distancia desde el origen $(0, 0)$ hasta el punto $(a, b)$. Esta distancia puede ser fácilmente calculada gracias al teorema de Pitágoras y va ser igual a $\sqrt{a^2 + b^2}$, como se puede ver en la siguiente figura:
End of explanation
# <!-- collapse=True -->
# Vectores ortogonales
ax = move_spines()
ax.set_xlim(-6, 6)
ax.set_ylim(-6, 6)
ax.grid()
vecs = [np.array([4, 6]), np.array([-3, 2])]
for v in vecs:
vect_fig(v, "blue")
a = ax.plot([-3, 4], [2, 6], linestyle='--', color='g')
# comprobando su producto interior.
v = np.array([4, 6])
w = np.array([-3, 2])
v.dot(w)
Explanation: En esta definición podemos observar que $a^2 + b^2 = v \cdot v$, por lo que ya estamos en condiciones de poder definir lo que en Álgebra lineal se conoce como norma.
El largo o norma de un vector $v = \begin{bmatrix} v_1 \ v_2 \ \vdots \ v_n \end{bmatrix}$, en $\mathbb{R}^n$ va a ser igual a un número no negativo $||v||$ definido por:
$$||v|| = \sqrt{v \cdot v} = \sqrt{v_1^2 + v_2^2 + \dots + v_n^2}$$
Es decir que la norma de un vector va a ser igual a la raíz cuadrada de la suma de los cuadrados de sus componentes.
Ortogonalidad
El concepto de perpendicularidad es fundamental en geometría. Este concepto llevado a los vectores en $\mathbb{R}^n$ se llama <a href="https://es.wikipedia.org/wiki/Ortogonalidad_(matem%C3%A1ticas)">ortogonalidad</a>.
Dos vectores $v$ y $w$ en $\mathbb{R}^n$ van a ser <a href="https://es.wikipedia.org/wiki/Ortogonalidad_(matem%C3%A1ticas)">ortogonales</a> el uno al otro si su producto interior es igual a cero. Es decir, $v \cdot w = 0$.
Geométricamente lo podemos ver de la siguiente manera:
End of explanation
# comprobando ortogonalidad del conjunto
v1 = np.array([2, 1, -1])
v2 = np.array([0, 1, 1])
v3 = np.array([1, -1, 1])
v1.dot(v2), v2.dot(v3), v1.dot(v3)
Explanation: Un conjunto de vectores en $\mathbb{R}^n$ va a ser <a href="https://es.wikipedia.org/wiki/Ortogonalidad_(matem%C3%A1ticas)">ortogonal</a> si todo los pares de los distintos vectores en el conjunto son <a href="https://es.wikipedia.org/wiki/Ortogonalidad_(matem%C3%A1ticas)">ortogonales</a> entre sí. O sea:
$v_i \cdot v_j = 0$ para todo $i, j = 1, 2, \dots, k$ y donde $i \ne j$.
Por ejemplo, si tenemos el siguiente conjunto de vectores en $\mathbb{R}^3$:
$$v1 = \begin{bmatrix} 2 \ 1 \ -1\end{bmatrix} \
v2 = \begin{bmatrix} 0 \ 1 \ 1\end{bmatrix} \
v3 = \begin{bmatrix} 1 \ -1 \ 1\end{bmatrix}$$
En este caso, deberíamos combrobar que:
$$v1 \cdot v2 = 0 \
v2 \cdot v3 = 0 \
v1 \cdot v3 = 0 $$
End of explanation
# Determinante con sympy
A = sympy.Matrix( [[1, 2, 3],
[2,-2, 4],
[2, 2, 5]] )
A.det()
# Determinante con numpy
A = np.array([[1, 2, 3],
[2,-2, 4],
[2, 2, 5]] )
np.linalg.det(A)
# Determinante como funcion lineal de fila
A[0] = A[0:1]*5
np.linalg.det(A)
# cambio de signo de determinante
A = sympy.Matrix( [[2,-2, 4],
[1, 2, 3],
[2, 2, 5]] )
A.det()
Explanation: Como vemos, este conjunto es <a href="https://es.wikipedia.org/wiki/Ortogonalidad_(matem%C3%A1ticas)">ortogonal</a>. Una de las principales ventajas de trabajar con conjuntos de vectores <a href="https://es.wikipedia.org/wiki/Ortogonalidad_(matem%C3%A1ticas)">ortogonales</a> es que los mismos son necesariamente linealmente independientes.
El concepto de <a href="https://es.wikipedia.org/wiki/Ortogonalidad_(matem%C3%A1ticas)">ortogonalidad</a> es uno de los más importantes y útiles en Álgebra lineal y surge en muchas situaciones prácticas, sobre todo cuando queremos calcular distancias.
Determinante
El <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> es un número especial que puede calcularse sobre las matrices cuadradas. Este número nos va a decir muchas cosas sobre la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a>. Por ejemplo, nos va decir si la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> es invertible o no. Si el <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> es igual a cero, la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> no es invertible. Cuando la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> es invertible, el <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> de $A^{-1}= 1/(\det \ A)$. El <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> también puede ser útil para calcular áreas.
Para obtener el <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> de una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> debemos calcular la suma de los productos de las diagonales de la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> en una dirección menos la suma de los productos de las diagonales en la otra dirección. Se represente con el símbolo $|A|$ o $\det A$.
Algunas de sus propiedades que debemos tener en cuenta son:
a. El <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> de la matriz identidad es igual a 1. $\det I = 1$.
b. Una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A$ es singular (no tiene inversa) si su <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> es igual a cero.
c. El <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> cambia de signo cuando dos columnas(o filas) son intercambiadas.
d. Si dos filas de una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A$ son iguales, entonces el <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> es cero.
e. Si alguna fila de la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A$ son todos ceros, entonces el <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> es cero.
f. La matriz transpuesta $A^T$, tiene el mismo <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> que $A$.
g. El <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> de $AB$ es igual al <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> de $A$ multiplicado por el <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> de $B$. $\det (AB) = \det A \cdot \det B$.
h. El <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> es una función lineal de cada una de las filas en forma separada. Si multiplicamos solo una fila por $\alpha$, entonces el <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> también es multiplicado por $\alpha$.
Veamos como podemos obtener el <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> con la ayuda de Python
End of explanation
# Eigenvalores con numpy
A = np.array([[3, 2],
[7, -2]])
x, v = np.linalg.eig(A)
# x Eigenvalor, v Eigenvector
x, v
# Eigenvalores con SymPy
A = sympy.Matrix([[3, 2],
[7, -2]])
# Eigenvalor
A.eigenvals()
# Eigenvector
A.eigenvects()
# comprobando la solución Ax = λx
# x eigenvector, v eigenvalue
x = A.eigenvects()[0][2][0]
v = A.eigenvects()[0][0]
# Ax == vx
A*x, v*x
Explanation: Eigenvalores y Eigenvectores
Cuando estamos resolviendo ecuaciones lineales del tipo $Ax = b$, estamos trabajando con problemas estáticos. ¿Pero qué pasa si quisiéramos trabajar con problemas dinámicos?. Es en este tipo de situaciones donde los Eigenvalores y Eigenvectores tienen su mayor importancia.
Supongamos que tenemos una matriz cuadrada $A$ de $n \times n$. Una pregunta natural que nos podríamos hacer sobre $A$ es: ¿Existe algún vector $x$ distinto de cero para el cual $Ax$ es un <a href="https://es.wikipedia.org/wiki/Escalar_(matem%C3%A1tica)">escalar</a> múltiplo de $x$?. Si llevamos esta pregunta al lenguaje matemático nos vamos a encontrar con la siguiente ecuación:
$$Ax = \lambda x$$
Cuando esta ecuación es válida y $x$ no es cero, decimos que $\lambda$ es el Eigenvalor o valor propio de $A$ y $x$ es su correspondiente Eigenvector o vector propio.
Muchos problemas en ciencia derivan en problemas de Eigenvalores, en los cuales la principal pregunta es: ¿Cuáles son los Eigenvalores de una <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> dada, y cuáles son sus correspondientes Eigenvectores. Un área donde nos va a ser de mucha utilidad esta teoría, es en problemas con sistemas de ecuaciones diferenciales lineales.
Calculando Eigenvalores
Hasta aquí todo muy bien, pero dada una matriz cuadrada $A$ de $n \times n$, ¿cómo podemos obtener sus Eigenvalores?.
Podemos comenzar por observar que la ecuación $Ax = \lambda x$ es equivalente a $(A - \lambda I)x = 0$. Dado que estamos interesados en soluciones a esta ecuación que sean distintas de cero, la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A - \lambda I$ debe ser singular, no invertible, por lo tanto su <a href="https://es.wikipedia.org/wiki/Determinante_(matem%C3%A1tica)">determinante</a> debe ser cero, $\det (A - \lambda I) = 0$. De esta forma, podemos utilizar esta ecuación para encontrar los Eigenvalores de $A$. Particularmente, podríamos formar el polinomio característico de la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A$, el cual va a tener grado $n$ y por lo tanto va a tener $n$ soluciones, es decir que vamos a encontrar $n$ Eigenvalores.
Algo que debemos tener en cuenta es, que a pesar de que la <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a> $A$ sea real, debemos estar preparados para encontrar Eigenvalores que sean complejos.
Para que quede más claro, veamos un ejemplo de como podemos calcular los Eigenvalores. Supongamos que tenemos la siguiente <a href="https://es.wikipedia.org/wiki/Matriz_(matem%C3%A1ticas)">matriz</a>:
$$A = \begin{bmatrix} 3 & 2 \ 7 & -2 \end{bmatrix}$$
Su polinomio característico va a ser igual a:
$$p(\lambda) = \det (A - \lambda I) = \det \begin{bmatrix}3 - \lambda & 2 \ 7 & -2-\lambda\end{bmatrix} = (3 - \lambda)(-2-\lambda) - 14 \ =\lambda^2 - \lambda - 20 = (\lambda - 5) (\lambda + 4)$$
Por lo tanto los Eigenvalores de $A$ van a ser $5$ y $-4$.
Obviamente, también los podemos obtener mucho más fácilmente con la ayuda de Python.
End of explanation |
14,709 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Contest entry by Wouter Kimman
Strategy
Step1: First steps, reading in and exploring the data are the same as Brendon's steps
Step2: 1) Prediction from training set using all wells
Let's do a first shot with random forests.
First we cheat and see how awesome we would do if the test data was not from an independent well
Step3: scale the data
Step4: 2) Prediction of Blind well
Step5: The prediction performs much much beter if the all data is included in the training,
compared to blind wells. Shouldn't be that much a surprise but doesn't this suggest some wells
are not representative of the others
Step6: This is the benchmark to beat
Step7: Basic statistics by well
Step8: 4 ) Select a feature from 1 well and play with this
Step9: Train for the test data | Python Code:
from numpy.fft import rfft
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
import plotly.plotly as py
import pandas as pd
import timeit
from sqlalchemy.sql import text
from sklearn import tree
from sklearn import cross_validation
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from sklearn.cross_validation import cross_val_score
from sklearn.tree import export_graphviz
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
#import sherlock.filesystem as sfs
#import sherlock.database as sdb
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
Explanation: Contest entry by Wouter Kimman
Strategy:
Trying some pre-processing with simple random forest..hopefully preprocessing as important as type of classifier
The problem has a smell of descision trees to me, since most predictions of the neighboring
facies are very accurate.
End of explanation
filename = 'training_data.csv'
training_data0 = pd.read_csv(filename)
training_data0.head()
Explanation: First steps, reading in and exploring the data are the same as Brendon's steps:
End of explanation
correct_facies_labels = training_data0['Facies'].values
feature_vectors = training_data0.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1)
feature_vectors.describe()
Explanation: 1) Prediction from training set using all wells
Let's do a first shot with random forests.
First we cheat and see how awesome we would do if the test data was not from an independent well
End of explanation
scaler = preprocessing.StandardScaler().fit(feature_vectors)
scaled_features = scaler.transform(feature_vectors)
X_train, X_test, y_train, y_test = train_test_split(scaled_features, correct_facies_labels, test_size=0.2, random_state=0)
rf = RandomForestClassifier(max_depth = 15,n_estimators=200,max_features=None)
#rf = RandomForestClassifier()
rf.fit(X_train, y_train)
predicted_random_forest = rf.predict(X_test)
print "prediction from random forest:"
print metrics.accuracy_score(list(y_test), predicted_random_forest)
print "f1 score:"
print metrics.f1_score(list(y_test), predicted_random_forest,average = 'weighted')
training_data=training_data0.copy()
Explanation: scale the data:
End of explanation
#remove 1 well
blind = training_data[training_data['Well Name'] == 'SHANKLE']
training_data = training_data[training_data['Well Name'] != 'SHANKLE']
correct_facies_labels = training_data['Facies'].values
feature_vectors = training_data.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1)
scaler = preprocessing.StandardScaler().fit(feature_vectors)
scaled_features = scaler.transform(feature_vectors)
X_train, dum1, y_train, dum2 = train_test_split(scaled_features, correct_facies_labels, test_size=0.2, random_state=0)
rf.fit(X_train, y_train)
# get the blind well
correct_facies_labels = blind['Facies'].values
feature_vectors = blind.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1)
scaler = preprocessing.StandardScaler().fit(feature_vectors)
scaled_features = scaler.transform(feature_vectors)
predicted_random_forest = rf.predict(scaled_features)
print "All training data different from test well"
print "prediction from random forest:"
print metrics.accuracy_score(correct_facies_labels, predicted_random_forest)
print "f1 score:"
print metrics.f1_score(correct_facies_labels, predicted_random_forest,average = 'weighted')
Explanation: 2) Prediction of Blind well
End of explanation
from sklearn.metrics import confusion_matrix
from classification_utilities import display_cm, display_adj_cm
#conf = confusion_matrix(correct_facies_labels, predicted_gradboost)
conf = confusion_matrix(correct_facies_labels, predicted_random_forest)
display_cm(conf, facies_labels, hide_zeros=True)
Explanation: The prediction performs much much beter if the all data is included in the training,
compared to blind wells. Shouldn't be that much a surprise but doesn't this suggest some wells
are not representative of the others
End of explanation
temp_1=training_data.groupby('Formation').mean()
temp_2=training_data.groupby('Facies').mean()
#temp_3=training_data.groupby('Facies').count()
temp_2
Explanation: This is the benchmark to beat : 0.44 using rf, (slightly higher for gradient boost)
3) Data exploration
Basic statistics by facies:
End of explanation
temp_4=training_data.groupby('Well Name')
#temp_4.describe()
#temp_5=training_data.groupby('Well Name').count()
#temp_5=training_data.groupby('Well Name').max()
temp_5=training_data.groupby('Well Name').mean()
temp_5
Explanation: Basic statistics by well:
End of explanation
xx0 = list(training_data0.Facies)
#xx1 = list(training_data0.DeltaPHI)
xx1 = list(training_data0.GR)
x_min1=np.roll(xx1, 1)
x_min2=np.roll(xx1, 2)
x_min3=np.roll(xx1, 3)
scale=0.5
#b, a = signal.butter(2, 0.125, analog=False)
b, a = signal.butter(2, 0.09, btype='low', analog=False)
b, a = signal.butter(2, 0.2, btype='high', analog=False)
xx1=xx1-np.mean(xx1)
xx_fil = signal.filtfilt(b, a, xx1)
xx_mf= signal.medfilt(xx1,15)
xx_grad=np.gradient(xx1)
fig, ax = plt.subplots(figsize=(30, 20))
plt.plot(scale*xx1, color='black', label='Original Delta PHI')
#plt.plot(scale*xx_grad, color='blue', label='derivative')
#plt.plot(scale*xx_fil, color='red', label='low pass filter')
#plt.plot(scale*xx_fil, color='red', label='high pass filter')
plt.plot(scale*xx_mf, color='blue', label='median filter')
#plt.plot(x_min1, color='yellow', label='1 sample shift')
#xlim([500 800])
plt.plot(xx0, color='green', label='Facies')
ax.set_xlim(400,700)
#plt.plot(sig_lf, color='#cc0000', label='lfilter')
plt.legend(loc="best")
plt.show()
def magic(df):
df1=df.copy()
b, a = signal.butter(2, 0.2, btype='high', analog=False)
feats00=['GR','ILD_log10','DeltaPHI','PHIND','PE','NM_M','RELPOS']
feats01=['GR','DeltaPHI','PHIND']
for ii in feats0:
df1[ii]=df[ii]
name1=ii + '_1'
name2=ii + '_2'
name3=ii + '_3'
name4=ii + '_4'
xx1 = list(df[ii])
xx_mf= signal.medfilt(xx1,9)
x_min3=np.roll(xx_mf, 3)
xx1a=xx1-np.mean(xx1)
xx_fil = signal.filtfilt(b, a, xx1)
xx_grad=np.gradient(xx1a)
if ii in feats01:
df1[name1]=x_min3
df1[name2]=xx_fil
df1[name3]=xx_grad
df1[name4]=xx_mf
return df1
#del training_data1
df=training_data0.copy()
training_data1=magic(df)
x=rf.feature_importances_
kolummen = feature_vectors.columns.tolist()
mask=x>0.025
mask=x>0.035
#mask=x>0.025
x1=x[mask]
#kols=kolummen[mask]
kols=[]
kols_out=[]
count=0
for name in kolummen:
if mask[count]==True:
kols.append(name)
else:
kols_out.append(name)
count+=1
fig, ax = plt.subplots(figsize=(30, 20))
## the data
N = len(kols)
#N = len(kolummen)-18
#X=gradboost.feature_importances_
#X=rf.feature_importances_
X=x1
## necessary variables
ind = np.arange(N) # the x locations for the groups
width = 0.30 # the width of the bars
fsize=16
## the bars
rects1 = ax.bar(ind, X, width,
color='black')
# axes and labels
ax.set_xlim(-width,len(ind)+width)
#ax.set_ylim(0,45)
ax.set_xlabel('feature', fontsize=fsize)
ax.set_ylabel('importance', fontsize=fsize)
ax.set_title('feature importance', fontsize=fsize)
#xTickMarks = ['Group'+str(i) for i in range(1,6)]
xTickMarks = kols
ax.set_xticks(ind+width)
xtickNames = ax.set_xticklabels(xTickMarks, fontsize=fsize)
plt.setp(xtickNames, rotation=45, fontsize=fsize)
## add a legend
#ax.legend( (rects1[0], rects2[0]), ('Men', 'Women') )
print count
print N
plt.show()
training_data1a = training_data1.drop(kols_out, axis=1)
training_data1a.head()
def run_test(remove_well, df_train):
#df_test=training_data0
df_test=training_data1
#---------------------------------
#df_train=training_data1a
#df_train=training_data2
#df_test=df_test.drop(kols_out, axis=1)
#---------------------------------
#df_train=training_data0
#df_train=training_data1
#df_train=df_train.drop(kols_out, axis=1)
#training_data1a = training_data1.drop(kols_out, axis=1)
blind = df_test[df_test['Well Name'] == remove_well]
training_data = df_train[df_train['Well Name'] != remove_well]
correct_facies_labels_train = training_data['Facies'].values
feature_vectors = training_data.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1)
scaler = preprocessing.StandardScaler().fit(feature_vectors)
#scaled_features_train = scaler.transform(feature_vectors)
scaled_features_train = feature_vectors
rf = RandomForestClassifier(max_depth = 15, n_estimators=600)
#rf = RandomForestClassifier()
rf.fit(scaled_features_train, correct_facies_labels_train)
# get the blind well
correct_facies_labels = blind['Facies'].values
feature_vectors = blind.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1)
scaler = preprocessing.StandardScaler().fit(feature_vectors)
#scaled_features = scaler.transform(feature_vectors)
scaled_features =feature_vectors
predicted_random_forest = rf.predict(scaled_features)
#print "All training data different from test well"
#print "prediction from random forest:"
#print metrics.accuracy_score(correct_facies_labels, predicted_random_forest)
#printnt "f1 score:"
#print metrics.f1_score(correct_facies_labels, predicted_random_forest,average = None)
#print "average"
out_f1=metrics.f1_score(correct_facies_labels, predicted_random_forest,average = 'micro')
return out_f1
#print
# 5-Fold Cross validation
#print "3-Fold Cross validation"
#cv_scores = cross_val_score(rf, scaled_features, correct_facies_labels, cv=4, scoring='f1_macro')
#avg_cv_score = np.mean(cv_scores)
#print cv_scores
#avg_cv_score
#df_train=training_data1a
df_train=training_data1
wells=['CHURCHMAN BIBLE','SHANKLE','NOLAN','NEWBY','Recruit F9' ,'CROSS H CATTLE','LUKE G U','SHRIMPLIN']
av_all=[]
for remove_well in wells:
all=[]
print("well : %s, f1 for different runs:" % (remove_well))
for ii in range(3):
out_f1=run_test(remove_well,df_train)
all.append(out_f1)
av1=np.mean(all)
av_all.append(av1)
print("average f1 is %f, 2*std is %f" % (av1, 2*np.std(all)) )
print("overall average f1 is %f" % (np.mean(av_all)))
#rf = RandomForestClassifier(max_depth = 1, max_features= 'sqrt', n_estimators=50, oob_score = True)
rfc = RandomForestClassifier(max_depth = 9, max_features= 'sqrt', n_estimators=250)
#rf = RandomForestClassifier()
#rf.fit(scaled_features_train, correct_facies_labels_train)
param_grid = {
'max_depth' : [5,6,7,8,9],
'n_estimators': [150, 250, 350, 600]
}
# 'max_features': ['auto', 'sqrt', 'log2']
#}
CV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5)
#CV_rfc.fit(X, y)
CV_rfc.fit(scaled_features_train, correct_facies_labels_train)
print CV_rfc.best_params_
Explanation: 4 ) Select a feature from 1 well and play with this
End of explanation
filename = 'training_data.csv'
training_data = pd.read_csv(filename)
filename = 'validation_data_nofacies.csv'
test_data = pd.read_csv(filename)
test_data.head()
training_data['Well Name'] = training_data['Well Name'].astype('category')
training_data['Formation'] = training_data['Formation'].astype('category')
training_data['Well Name'].unique()
facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D','PS', 'BS']
training_data.loc[:,'FaciesLabels'] = training_data.apply(lambda row: label_facies(row, facies_labels), axis=1)
#preprocessing
test_data1=magic(test_data)
training_data1=magic(training_data)
def predict_final(test_well, training_data,test_data):
blind = test_data[test_data['Well Name'] == test_well]
correct_facies_labels_train = training_data['Facies'].values
feature_vectors_train = training_data.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1)
rf = RandomForestClassifier(max_depth = 15, n_estimators=600)
rf.fit(feature_vectors_train, correct_facies_labels_train)
# the blind well
feature_vectors_blind = blind.drop(['Formation', 'Well Name', 'Depth'], axis=1)
predicted_random_forest = rf.predict(feature_vectors_blind)
#out_f1=metrics.f1_score(correct_facies_labels, predicted_random_forest,average = 'micro')
return predicted_random_forest
test_well='STUART'
predicted_stu=predict_final(test_well, training_data1, test_data1)
test_well='CRAWFORD'
predicted_craw=predict_final(test_well, training_data1, test_data1)
predicted_stu
predicted_craw
Explanation: Train for the test data
End of explanation |
14,710 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Dual CRISPR Screen Analysis
Count Plots
Amanda Birmingham, CCBB, UCSD ([email protected])
Instructions
To run this notebook reproducibly, follow these steps
Step1: Matplotlib Display
Step2: CCBB Library Imports
Step3: Automated Set-Up
Step4: Count File Suffixes
Step5: Count Plots Functions
Step6: Individual fastq Plots
Step7: Individual Sample Plots
Step8: Combined Samples Plots | Python Code:
g_timestamp = ""
g_dataset_name = "20160510_A549"
g_count_alg_name = "19mer_1mm_py"
g_fastq_counts_dir = '/Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/data/interim/20160510_D00611_0278_BHK55CBCXX_A549'
g_fastq_counts_run_prefix = "19mer_1mm_py_20160615223822"
g_collapsed_counts_dir = "/Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/data/processed/20160510_A549"
g_collapsed_counts_run_prefix = "20160510_A549_19mer_1mm_py_20160616101309"
g_combined_counts_dir = ""
g_combined_counts_run_prefix = ""
g_plots_dir = ""
g_plots_run_prefix = ""
g_code_location = "/Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/src/python"
Explanation: Dual CRISPR Screen Analysis
Count Plots
Amanda Birmingham, CCBB, UCSD ([email protected])
Instructions
To run this notebook reproducibly, follow these steps:
1. Click Kernel > Restart & Clear Output
2. When prompted, click the red Restart & clear all outputs button
3. Fill in the values for your analysis for each of the variables in the Input Parameters section
4. Click Cell > Run All
<a name = "input-parameters"></a>
Input Parameters
End of explanation
%matplotlib inline
Explanation: Matplotlib Display
End of explanation
import sys
sys.path.append(g_code_location)
Explanation: CCBB Library Imports
End of explanation
# %load -s describe_var_list /Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/src/python/ccbbucsd/utilities/analysis_run_prefixes.py
def describe_var_list(input_var_name_list):
description_list = ["{0}: {1}\n".format(name, eval(name)) for name in input_var_name_list]
return "".join(description_list)
from ccbbucsd.utilities.analysis_run_prefixes import check_or_set, get_run_prefix, get_timestamp
g_timestamp = check_or_set(g_timestamp, get_timestamp())
g_collapsed_counts_dir = check_or_set(g_collapsed_counts_dir, g_fastq_counts_dir)
g_collapsed_counts_run_prefix = check_or_set(g_collapsed_counts_run_prefix, g_fastq_counts_run_prefix)
g_combined_counts_dir = check_or_set(g_combined_counts_dir, g_collapsed_counts_dir)
g_combined_counts_run_prefix = check_or_set(g_combined_counts_run_prefix, g_collapsed_counts_run_prefix)
g_plots_dir = check_or_set(g_plots_dir, g_combined_counts_dir)
g_plots_run_prefix = check_or_set(g_plots_run_prefix,
get_run_prefix(g_dataset_name, g_count_alg_name, g_timestamp))
print(describe_var_list(['g_timestamp','g_collapsed_counts_dir', 'g_collapsed_counts_run_prefix',
'g_combined_counts_dir', 'g_combined_counts_run_prefix', 'g_plots_dir',
'g_plots_run_prefix']))
from ccbbucsd.utilities.files_and_paths import verify_or_make_dir
verify_or_make_dir(g_collapsed_counts_dir)
verify_or_make_dir(g_combined_counts_dir)
verify_or_make_dir(g_plots_dir)
Explanation: Automated Set-Up
End of explanation
# %load -s get_counts_file_suffix /Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/src/python/ccbbucsd/malicrispr/construct_counter.py
def get_counts_file_suffix():
return "counts.txt"
# %load -s get_collapsed_counts_file_suffix,get_combined_counts_file_suffix /Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/src/python/ccbbucsd/malicrispr/count_combination.py
def get_collapsed_counts_file_suffix():
return "collapsed.txt"
def get_combined_counts_file_suffix():
return "counts_combined.txt"
Explanation: Count File Suffixes
End of explanation
# %load /Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/src/python/ccbbucsd/malicrispr/count_plots.py
# third-party libraries
import matplotlib.pyplot
import numpy
import pandas
# ccbb libraries
from ccbbucsd.utilities.analysis_run_prefixes import strip_run_prefix
from ccbbucsd.utilities.files_and_paths import build_multipart_fp, get_file_name_pieces, get_filepaths_by_prefix_and_suffix
# project-specific libraries
from ccbbucsd.malicrispr.count_files_and_dataframes import get_counts_df
__author__ = "Amanda Birmingham"
__maintainer__ = "Amanda Birmingham"
__email__ = "[email protected]"
__status__ = "prototype"
DEFAULT_PSEUDOCOUNT = 1
def get_boxplot_suffix():
return "boxplots.png"
def make_log2_series(input_series, pseudocount_val):
revised_series = input_series + pseudocount_val
log2_series = revised_series.apply(numpy.log2)
nan_log2_series = log2_series.replace([numpy.inf, -numpy.inf], numpy.nan)
return nan_log2_series.dropna().reset_index(drop=True)
# note that .reset_index(drop=True) is necessary as matplotlib boxplot function (perhaps among others)
# throws an error if the input series doesn't include an item with index 0--which can be the case if
# that first item was NaN and was dropped, and series wasn't reindexed.
def show_and_save_histogram(output_fp, title, count_data):
matplotlib.pyplot.figure(figsize=(20,20))
matplotlib.pyplot.hist(count_data)
matplotlib.pyplot.title(title)
matplotlib.pyplot.xlabel("log2(raw counts)")
matplotlib.pyplot.ylabel("Frequency")
matplotlib.pyplot.savefig(output_fp)
matplotlib.pyplot.show()
def show_and_save_boxplot(output_fp, title, samples_names, samples_data, rotation_val=0):
fig = matplotlib.pyplot.figure(1, figsize=(20,20))
ax = fig.add_subplot(111)
bp = ax.boxplot(samples_data)
ax.set_xticklabels(samples_names, rotation=rotation_val)
ax.set_xlabel("samples")
ax.set_ylabel("log2(raw counts)")
matplotlib.pyplot.title(title)
fig.savefig(output_fp, bbox_inches='tight')
matplotlib.pyplot.show()
def plot_raw_counts(input_dir, input_run_prefix, counts_suffix, output_dir, output_run_prefix, boxplot_suffix):
counts_fps_for_run = get_filepaths_by_prefix_and_suffix(input_dir, input_run_prefix, counts_suffix)
for curr_counts_fp in counts_fps_for_run:
_, curr_sample, _ = get_file_name_pieces(curr_counts_fp)
stripped_sample = strip_run_prefix(curr_sample, input_run_prefix)
count_header, curr_counts_df = get_counts_df(curr_counts_fp, input_run_prefix)
curr_counts_df.rename(columns={count_header:stripped_sample}, inplace=True)
count_header = stripped_sample
log2_series = make_log2_series(curr_counts_df[count_header], DEFAULT_PSEUDOCOUNT)
title = " ".join([input_run_prefix, count_header, "with pseudocount", str(DEFAULT_PSEUDOCOUNT)])
output_fp_prefix = build_multipart_fp(output_dir, [count_header, input_run_prefix])
boxplot_fp = output_fp_prefix + "_" + boxplot_suffix
show_and_save_boxplot(boxplot_fp, title, [count_header], log2_series)
hist_fp = output_fp_prefix + "_" + "hist.png"
show_and_save_histogram(hist_fp, title, log2_series)
def plot_combined_raw_counts(input_dir, input_run_prefix, combined_suffix, output_dir, output_run_prefix, boxplot_suffix):
output_fp = build_multipart_fp(output_dir, [output_run_prefix, boxplot_suffix])
combined_counts_fp = build_multipart_fp(input_dir, [input_run_prefix, combined_suffix])
combined_counts_df = pandas.read_table(combined_counts_fp)
samples_names = combined_counts_df.columns.values[1:] # TODO: remove hardcode
samples_data = []
for curr_name in samples_names:
log2_series = make_log2_series(combined_counts_df[curr_name], DEFAULT_PSEUDOCOUNT)
samples_data.append(log2_series.tolist())
title = " ".join([input_run_prefix, "all samples", "with pseudocount", str(DEFAULT_PSEUDOCOUNT)])
show_and_save_boxplot(output_fp, title, samples_names, samples_data, 90)
Explanation: Count Plots Functions
End of explanation
from ccbbucsd.utilities.files_and_paths import summarize_filenames_for_prefix_and_suffix
print(summarize_filenames_for_prefix_and_suffix(g_fastq_counts_dir, g_fastq_counts_run_prefix, get_counts_file_suffix()))
# this call makes one boxplot per raw fastq
plot_raw_counts(g_fastq_counts_dir, g_fastq_counts_run_prefix, get_counts_file_suffix(), g_plots_dir,
g_plots_run_prefix, get_boxplot_suffix())
Explanation: Individual fastq Plots
End of explanation
print(summarize_filenames_for_prefix_and_suffix(g_collapsed_counts_dir, g_collapsed_counts_run_prefix,
get_collapsed_counts_file_suffix()))
plot_raw_counts(g_collapsed_counts_dir, g_collapsed_counts_run_prefix, get_collapsed_counts_file_suffix(),
g_plots_dir, g_plots_run_prefix, get_boxplot_suffix())
Explanation: Individual Sample Plots
End of explanation
print(summarize_filenames_for_prefix_and_suffix(g_combined_counts_dir, g_combined_counts_run_prefix,
get_combined_counts_file_suffix()))
plot_combined_raw_counts(g_combined_counts_dir, g_combined_counts_run_prefix, get_combined_counts_file_suffix(),
g_plots_dir, g_plots_run_prefix, get_boxplot_suffix())
Explanation: Combined Samples Plots
End of explanation |
14,711 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Income dataset
https
Step1: Merged income, zipcode, and station id for final dataframe | Python Code:
income = pd.read_excel("../data/unique/ACS_14_5YR_B19013.xls")
income = income.loc[8:]
income.head()
income = income.drop(['Unnamed: 1', 'Unnamed: 2', 'Unnamed: 3'], axis=1)
income = income.rename(columns={'B19013: MEDIAN HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2014 INFLATION-ADJUSTED DOLLARS) - Universe: Households': 'Zip_Code', 'Unnamed: 4': 'Median_Househould_Income', '$b': 'b'})
zips = []
for elem in income['Zip_Code']:
zips.append(str(elem))
zips2 = []
for elem in zips:
zips2.append(elem[6:])
income['Zip_Code'] = zips2
income['Zip_Code'] = pd.to_numeric(income['Zip_Code'])
income.head()
income["Zip"] = income["Zip_Code"].dropna().astype('int')
income.drop('Zip_Code', axis=1, inplace=True)
Explanation: Income dataset
https://factfinder.census.gov/faces/nav/jsf/pages/searchresults.xhtml?refresh=t
The US Census generates a 'Median Household Income in the Past 12 Months (In 2014 Inflation-Adjusted Dollars)' from data collected by the American Community Survey. This report is based on 5-year estimates, from 2010-2014. The report I pulled is the most recent income-related information available by the US Census.
* I'm currently reading the data collection methodology for this project, so will update the group about how the data was collected when done.
End of explanation
stations = pd.read_csv('../data/processed/stations.csv')
# left join of stations and income_zips
stations_income = stations.merge(income, how='inner', on='Zip')
stations_income.head()
print(type(stations_income))
print(len(stations_income))
stations_income.to_csv("../data/processed/stations-income.csv")
Explanation: Merged income, zipcode, and station id for final dataframe
End of explanation |
14,712 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Requirement
Step1: Sending a mail is, with the proper library, a piece of cake...
Step2: ... but if we take it a little further, we can connect our doorbell project to the sending of mail!
APPKEY is the Application Key for a (free) http | Python Code:
MAIL_SERVER = "mail.****.com"
FROM_ADDRESS = "noreply@****.com"
TO_ADDRESS = "my_friend@****.com"
Explanation: Requirement:
For sending mail you need an outgoing mail server (that, in the case of this script, also needs to allow unauthenticated outgoing communication). Fill out the required credentials in the folowing variables:
End of explanation
from sender import Mail
mail = Mail(MAIL_SERVER)
mail.fromaddr = ("Secret admirer", FROM_ADDRESS)
mail.send_message("Raspberry Pi has a soft spot for you", to=TO_ADDRESS, body="Hi sweety! Grab a smoothie?")
Explanation: Sending a mail is, with the proper library, a piece of cake...
End of explanation
APPKEY = "******"
mail.fromaddr = ("Your doorbell", FROM_ADDRESS)
mail_to_addresses = {
"Donald Duck":"dd@****.com",
"Maleficent":"mf@****.com",
"BigBadWolf":"bw@****.com"
}
def on_message(sender, channel, message):
mail_message = "{}: Call for {}".format(channel, message)
print(mail_message)
mail.send_message("Raspberry Pi alert!", to=mail_to_addresses[message], body=mail_message)
import ortc
oc = ortc.OrtcClient()
oc.cluster_url = "http://ortc-developers.realtime.co/server/2.1"
def on_connected(sender):
print('Connected')
oc.subscribe('doorbell', True, on_message)
oc.set_on_connected_callback(on_connected)
oc.connect(APPKEY)
Explanation: ... but if we take it a little further, we can connect our doorbell project to the sending of mail!
APPKEY is the Application Key for a (free) http://www.realtime.co/ "Realtime Messaging Free" subscription.
See "104 - Remote deurbel - Een cloud API gebruiken om berichten te sturen" voor meer gedetailleerde info. info.
End of explanation |
14,713 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Tuning BM25 parameters
We tune BM25 parameters on a per-field basis including doc2query expansions and bigrammed text fields. These values are used later when optimizing more complex queries.
Step1: Baseline evaluation
Step2: Optimization
Step3: Best parameters
Step4: Conclusion
If you'd like to reset all parameters to their defaults, you can do so with the following code. However if you want to stick with the optimal values as shown above, you should skip running this. | Python Code:
%load_ext autoreload
%autoreload 2
import importlib
import os
import sys
from copy import deepcopy
from elasticsearch import Elasticsearch
from skopt.plots import plot_objective
# project library
sys.path.insert(0, os.path.abspath('..'))
import qopt
importlib.reload(qopt)
from qopt.notebooks import evaluate_mrr100_dev_templated, optimize_bm25_mrr100_templated, set_bm25_params
from qopt.optimize import Config, set_bm25_parameters
# use a local Elasticsearch or Cloud instance (https://cloud.elastic.co/)
es = Elasticsearch('http://localhost:9200')
# set the parallelization parameter `max_concurrent_searches` for the Rank Evaluation API calls
max_concurrent_searches = 10
index = 'msmarco-document.doc2query'
template_id = 'query'
# no query params
query_params = {}
# field names
field_names = [
'url',
'title', 'title.bigrams',
'body', 'body.bigrams',
'expansions', 'expansions.bigrams'
]
similarity_names = [f"bm25-{x.replace('.', '-')}" for x in field_names]
similarity_name_by_field = { field: similarity for field, similarity in zip(field_names, similarity_names) }
# default Elasticsearch BM25 params
default_bm25_params = {'k1': 1.2, 'b': 0.75}
# base template for tuning
base_templates = [{
"id": template_id,
"template": {
"lang": "mustache",
"source": { "query": {} }
}
}]
def reset_all():
for similarity in similarity_names:
set_bm25_parameters(es, index, name=similarity, **default_bm25_params)
def for_all_fields(message, fn, existing_results=None):
_results = {}
for field, similarity in zip(field_names, similarity_names):
print(f"{message}: {field}")
if not existing_results:
params = default_bm25_params
else:
params = existing_results[field][1]
set_bm25_parameters(es, index, name=similarity, **params)
_templates = deepcopy(base_templates)
_templates[0]['template']['source']['query']['match'] = { field: { "query": "{{query_string}}" } }
_results[field] = fn(_templates, similarity)
return _results
Explanation: Tuning BM25 parameters
We tune BM25 parameters on a per-field basis including doc2query expansions and bigrammed text fields. These values are used later when optimizing more complex queries.
End of explanation
%%time
_ = for_all_fields(
"Dev set evaluation",
fn=lambda templates, similarity: evaluate_mrr100_dev_templated(es, max_concurrent_searches, index, templates, template_id, query_params),
)
Explanation: Baseline evaluation
End of explanation
%%time
results = for_all_fields(
"Optimization",
fn=lambda templates, similarity: optimize_bm25_mrr100_templated(es, max_concurrent_searches, index, templates, template_id, query_params,
config_space=Config.parse({
'method': 'bayesian',
'num_iterations': 40,
'num_initial_points': 20,
'space': {
'k1': { 'low': 0.0, 'high': 5.0 },
'b': { 'low': 0.3, 'high': 1.0 },
}
}),
name=similarity),
)
for field, (_, _, _, metadata) in results.items():
_ = plot_objective(metadata, sample_source='result')
%%time
_ = for_all_fields(
"Dev set evaluation",
fn=lambda templates, similarity: evaluate_mrr100_dev_templated(es, max_concurrent_searches, index, templates, template_id, query_params),
existing_results=results,
)
Explanation: Optimization
End of explanation
best_params = [(x, best) for x, (_, best, _, _) in results.items()]
best_params
set_bm25_params(es, index, best_params)
Explanation: Best parameters
End of explanation
reset_all()
# best params from all previous runs with current fields and analyzers
[
('url', {'k1': 0.33066956222950633, 'b': 0.9589101032169087}), # 0.2201
('title', {'k1': 0.34885436112727763, 'b': 1.0}), # 0.2354
('title.bigrams', {'k1': 1.2, 'b': 0.75}), # 0.1295
('body', {'k1': 3.0128735487205525, 'b': 0.8200709176657588}), # 0.2645
('body.bigrams', {'k1': 1.9100199633100623, 'b': 0.7336619962002098}), # 0.2045
('expansions', {'k1': 4.870954366799399, 'b': 0.9249613913608172}), # 0.3220
('expansions.bigrams', {'k1': 1.2, 'b': 0.75}) # 0.2837
]
Explanation: Conclusion
If you'd like to reset all parameters to their defaults, you can do so with the following code. However if you want to stick with the optimal values as shown above, you should skip running this.
End of explanation |
14,714 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
!!! D . R . A . F . T !!!
Luminance
The Luminance $L_v$ is the quantity defined by the formula
Step1: Note
Step2: Note
Step3: ASTM D1535-08$^{\epsilon 1}$ (2008) Method
Since 1943, the reference white used for the Munsell Renotation System has changed.
As a result the quintic-parabola function from Newhall, Nickerson, and Judd (1943) has been adjusted
Step4: Note
Step5: CIE 1976 Method
The CIE $L^a^b^$ approximately uniform colourspace defined in 1976 computes the luminance* $Y$ quantity as follows
Step6: Note
Step7: Fairchild and Wyble (2010) Method
Step8: Fairchild and Chen (2011) Method | Python Code:
import colour
colour.utilities.filter_warnings(True, False)
sorted(colour.LUMINANCE_METHODS.keys())
Explanation: !!! D . R . A . F . T !!!
Luminance
The Luminance $L_v$ is the quantity defined by the formula: <a name="back_reference_1"></a><a href="#reference_1">[1]</a>
$$
\begin{equation}
L_v=\cfrac{d\Phi_v}{dAcos\theta d\Omega}
\end{equation}
$$
where $d\Phi_v$ is the luminous flux transmitted by an elementary beam passing through the given point and propagating in the solid angle, $d\Omega$, containing the given direction. $dA$ is the area of a section of that beam containing the given point. $\theta$ is the angle between the normal to that section and the direction of the beam.
$L_v$ unit is candela per square metre (or nits) $cd\cdot m^{-2}=lm\cdot m^{-2}\cdot sr^{-1}$.
Colour defines the following luminance computation methods:
End of explanation
colour.colorimetry.luminance_Newhall1943(3.74629715382)
Explanation: Note: 'astm2008' and 'cie1976' are convenient aliases for respectively 'ASTM D1535-08' and 'CIE 1976'.
Newhall, Nickerson, and Judd (1943) Method
Newhall, Nickerson, and Judd (1943) fitted a quintic-parabola function to the adjusted Munsell-Sloan-Godlove reflectances, the resulting equation computing luminance $R_Y$ as function of Munsell value $V$ is expressed as follows: <a name="back_reference_2"></a><a href="#reference_2">[2]</a>
$$
\begin{equation}
R_Y=1.2219V-0.23111V^2+0.23951V^3-0.021009V^4+0.0008404V^5
\end{equation}
$$
See Also: The Munsell Renotation System notebook for in-depth information about the Munsell Renotation System.
The colour.luminance_Newhall1943 definition is used to compute luminance $R_Y$:
End of explanation
colour.colorimetry.luminance(3.74629715382, method='Newhall 1943')
Explanation: Note: Input Munsell value $V$ is in domain [0, 10], output luminance $R_Y$ is in domain [0, 100].
The colour.luminance definition is implemented as a wrapper for various luminance computation methods:
End of explanation
colour.colorimetry.luminance_ASTMD153508(3.74629715382)
Explanation: ASTM D1535-08$^{\epsilon 1}$ (2008) Method
Since 1943, the reference white used for the Munsell Renotation System has changed.
As a result the quintic-parabola function from Newhall, Nickerson, and Judd (1943) has been adjusted: Each coefficient of the function has been multiplied by 0.975, the reflectance factor of magnesium oxide with respect to the perfect reflecting diffuser and then rounded to five digits.
The updated equation for computing luminance $Y$ as function of the Munsell value $V$ is expressed as follows: <a name="back_reference_3"></a><a href="#reference_3">[3]</a>
$$
\begin{equation}
Y=1.1914V-0.22533V^2+0.23352V^3-0.020484V^4+0.00081939V^5
\end{equation}
$$
See Also: The Munsell Renotation System notebook for in-depth information about the Munsell Renotation System.
The colour.luminance_ASTMD153508 definition is used to compute luminance $Y$:
End of explanation
colour.luminance(3.74629715382, method='ASTM D1535-08')
colour.luminance(3.74629715382, method='astm2008')
Explanation: Note: Input Munsell value $V$ is in domain [0, 10], output luminance $Y$ is in domain [0, 100].
Using the colour.luminance wrapper definition:
End of explanation
colour.colorimetry.luminance_CIE1976(37.9856290977)
Explanation: CIE 1976 Method
The CIE $L^a^b^$ approximately uniform colourspace defined in 1976 computes the luminance* $Y$ quantity as follows: <a name="back_reference_4"></a><a href="#reference_4">[4]</a>
$$
\begin{equation}
Y=\begin{cases}Y_n\biggl(\cfrac{L^+16}{116}\biggr)^3 & for\ L^>\kappa\epsilon\
Y_n\biggl(\cfrac{L^}{\kappa}\biggr) & for\ L^<=\kappa\epsilon
\end{cases}
\end{equation}
$$
where $Y_n$ is the reference white luminance.
with
$$
\begin{equation}
\begin{aligned}
\epsilon&\ =\begin{cases}0.008856 & Actual\ CIE\ Standard\
216\ /\ 24389 & Intent\ of\ the\ CIE\ Standard
\end{cases}\
\kappa&\ =\begin{cases}903.3 & Actual\ CIE\ Standard\
24389\ /\ 27 & Intent\ of\ the\ CIE\ Standard
\end{cases}
\end{aligned}
\end{equation}
$$
The original $\epsilon$ and $\kappa$ constants values have been shown to exhibit discontinuity at the junction point of the two functions grafted together to create the Lightness $L^*$ function. <a name="back_reference_5"></a><a href="#reference_5">[5]</a>
Colour uses the rational values instead of the decimal values for these constants.
See Also: The CIE $L^a^b^*$ Colourspace notebook for in-depth information about the CIE $L^a^b^$* colourspace.
The colour.luminance_CIE1976 definition is used to compute Luminance $Y$:
End of explanation
colour.luminance(37.9856290977, method='CIE 1976')
colour.luminance(37.9856290977, method='cie1976')
Explanation: Note: Input Lightness $L^$ and and $Y_n$ are in domain [0, 100], output luminance* $Y$ is in domain [0, 100].
Using the colour.luminance wrapper definition:
End of explanation
colour.colorimetry.luminance_Fairchild2010(24.902290269546651, 1.836)
colour.luminance(24.902290269546651, method='Fairchild 2010', epsilon=1.836)
Explanation: Fairchild and Wyble (2010) Method
End of explanation
colour.colorimetry.luminance_Fairchild2011(26.459509817572265, 0.710)
colour.luminance(26.459509817572265, method='Fairchild 2011', epsilon=0.710)
Explanation: Fairchild and Chen (2011) Method
End of explanation |
14,715 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<center><img src="src/ipyleaflet.svg" width="50%"></center>
Repository
Step1: Layers
Marker
Step2: Heatmap layer
Step3: Velocity
Step4: Controls
Step5: Clean | Python Code:
from ipyleaflet import Map, basemaps, basemap_to_tiles
center = (52.204793, 360.121558)
m = Map(
layers=(basemap_to_tiles(basemaps.NASAGIBS.ModisTerraTrueColorCR, "2018-11-12"), ),
center=center,
zoom=4
)
m
Explanation: <center><img src="src/ipyleaflet.svg" width="50%"></center>
Repository: https://github.com/jupyter-widgets/ipyleaflet
Installation:
conda install -c conda-forge ipyleaflet
Base map
End of explanation
from ipyleaflet import Marker, Icon
icon = Icon(icon_url='https://leafletjs.com/examples/custom-icons/leaf-red.png', icon_size=[38, 95], icon_anchor=[22,94])
mark = Marker(location=center, icon=icon, rotation_origin='22px 94px')
m.add_layer(mark)
import time
for _ in range(40):
mark.rotation_angle += 15
time.sleep(0.1)
Explanation: Layers
Marker
End of explanation
from ipywidgets import Button, IntSlider, link
from ipyleaflet import Heatmap
from random import gauss
import time
center = (37.09, -103.66)
zoom = 5
def create_random_data(length):
"Return a list of some random lat/lon/value triples."
return [[gauss(center[0], 2),
gauss(center[1], 4),
gauss(700, 300)] for i in range(length)]
m.center = center
m.zoom = zoom
heat = Heatmap(locations=create_random_data(1000), radius=20, blur=10)
m.add_layer(heat)
def generate(_):
heat.locations = create_random_data(1000)
button = Button(description='Generate data', button_style='success')
button.on_click(generate)
button
m
slider = IntSlider(min=10, max=30, value=heat.radius)
link((slider, 'value'), (heat, 'radius'))
slider
Explanation: Heatmap layer
End of explanation
from ipyleaflet import Velocity
import xarray as xr
center = (0, 0)
zoom = 4
m2 = Map(center=center, zoom=zoom, interpolation='nearest', basemap=basemaps.CartoDB.DarkMatter)
m2
ds = xr.open_dataset('src/wind-global.nc')
display_options = {
'velocityType': 'Global Wind',
'displayPosition': 'bottomleft',
'displayEmptyString': 'No wind data'
}
wind = Velocity(data=ds,
zonal_speed='u_wind',
meridional_speed='v_wind',
latitude_dimension='lat',
longitude_dimension='lon',
velocity_scale=0.01,
max_velocity=20,
display_options=display_options)
m2.add_layer(wind)
Explanation: Velocity
End of explanation
from ipyleaflet import Map, basemaps, basemap_to_tiles, SplitMapControl
m = Map(center=(42.6824, 365.581), zoom=5)
right_layer = basemap_to_tiles(basemaps.NASAGIBS.ModisTerraTrueColorCR, "2017-11-11")
left_layer = basemap_to_tiles(basemaps.NASAGIBS.ModisAquaBands721CR, "2017-11-11")
control = SplitMapControl(left_layer=left_layer, right_layer=right_layer)
m.add_control(control)
m
Explanation: Controls
End of explanation
from ipywidgets import Widget
Widget.close_all()
Explanation: Clean
End of explanation |
14,716 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
FMI Hirlam, MET Norway HARMONIE and NCEP GFS comparison demo
In this demo notebook we provide short comparison of using three different weather forecast models
Step1: Import datahub parsing library
Step2: Now we define hirlam and harmonie namespaces. Add server address and our API key.
<font color='red'>Please add your API key below
Step4: One can easily see what kind of variables are available in given dataset by just calling methods
Step5: Take GFS for area of HARMONIE
Step6: Dataset extent and resolution
Get some arbitrary field for demonstration, we use 2m temperature and as you can see, variable names may actually differ a lot between datasets. Please note that "get_tds_field" method is just for getting arbitrary preview image, if you wan't to query data for specific time and reftime, please refer to examples for our raster API (shown in other notebooks referenced to above) or use THREDDS server link given in dataset detail pages.
Extent
The easiest way to show dataset extent is to plot it on a map with proper projection. We do not show GFS here, because, well, it is global.
Step7: Resolution
Let's zoom in a little to illustrate difference in resolutions. By plotting the gridded data as a mesh, one can easily get the grid size from the figures. Plot's given for the Norwegian coast.
Step8: Can you guess which model is on which map by just looking at these images?
Forecast for a single location
First, get point data for all datasets for given variable and for as long time range as the forecast goes. | Python Code:
%matplotlib notebook
import numpy as np
print ('numpy version is ', np.__version__)
import matplotlib.pyplot as plt
import mpl_toolkits.basemap
print ('mpl_toolkits.basemap version is ', mpl_toolkits.basemap.__version__)
from mpl_toolkits.basemap import Basemap
import warnings
import datetime
import dateutil.parser
import matplotlib
print ('Matplotlib version is ',matplotlib.__version__)
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import xarray as xr
Explanation: FMI Hirlam, MET Norway HARMONIE and NCEP GFS comparison demo
In this demo notebook we provide short comparison of using three different weather forecast models:
GFS -- http://data.planetos.com/datasets/noaa_gfs_pgrb2_global_forecast_recompute_0.25degree
HIRLAM -- http://data.planetos.com/datasets/fmi_hirlam_surface
HARMONIE -- http://data.planetos.com/datasets/metno_harmonie_metcoop
You can get more information about the datasets by opening links to their detail pages, but their main difference is that GFS is a global, medium range weather forecast model with lower resolution, and HIRLAM and HARMONIE are limited area models, meaning they cover only small part of the globe, but provide higher resolution of all forecasted field, in return.
First we compare the datasets by showing their spatial coverages, then we demonstrate their resolutions by showing forecast field as a discrete grid (so one can see the difference in grid cell size and resolved surface details) and finally we demonstrate plotting weather forecast for the same variable from three models.
We try to keep this demo short, but in case you are interested in creating a more interactive notebook, please refer to our other examples:
https://github.com/planet-os/demos/blob/master/notebooks/PlanetOS_WAve_Models.ipynb
https://github.com/planet-os/notebooks/blob/master/api-examples/GFS_public_full_demo_main.ipynb
Unlike previous notebooks, we have moved most of the parsing code to external library dh_py_access, which you should get automatically if you get this notebook by cloning the git repository.
If you have any questions, contact our team at https://data.planetos.com
At first, let's import some modules. If you do not have them, download them (ie. using pip or conda).
If you encounter some errors, make sure you have the same numpy, basemap and matplotlib versions.
End of explanation
from API_client.python.lib.dataset import dataset
import dh_py_access.lib.datahub as datahub
from dh_py_access import package_api
# from dh_py_access.lib.dataset import dataset as dataset
# import dh_py_access.lib.datahub as datahub
# from dh_py_access import package_api
Explanation: Import datahub parsing library
End of explanation
server = 'http://api.planetos.com/v1/datasets/'
API_key = open('APIKEY').read().strip()
dh=datahub.datahub_main(API_key)
fmi_hirlam_surface=dataset('fmi_hirlam_surface',dh)
metno_harmonie_metcoop=dataset('metno_harmonie_metcoop',dh)
gfs=dataset('noaa_gfs_pgrb2_global_forecast_recompute_0.25degree',dh)
Explanation: Now we define hirlam and harmonie namespaces. Add server address and our API key.
<font color='red'>Please add your API key below:</font>
End of explanation
sample_var_names = {fmi_hirlam_surface:'Temperature_height_above_ground',
metno_harmonie_metcoop:'air_temperature_2m',
gfs:'tmp_m'}
today = datetime.datetime.today()
day_ago = today - datetime.timedelta(days=1)
reftime_start = datetime.datetime.strftime(day_ago, '%Y-%m-%dT') + '11:00:00'
reftime_end = datetime.datetime.strftime(day_ago, '%Y-%m-%dT') + '13:00:00'
def get_max_coverage_package(dataset, area_name, varfilter = 'temp'):
Download full coverage for limited area datasets
coords = dataset.get_dataset_boundaries()
ds_west = np.amin([i[0] for i in coords])
ds_east = np.amax([i[0] for i in coords])
ds_south = np.amin([i[1] for i in coords])
ds_north = np.amax([i[1] for i in coords])
temperature_variable = sample_var_names[dataset]
assert len(temperature_variable) >= 1, "something wrong {0}".format(temperature_variable)
assert type(temperature_variable) == str
return package_api.package_api(dh,dataset.datasetkey,temperature_variable,ds_west,ds_east,ds_south,ds_north,area_name=area_name)
area_name = 'maximum_04'
package_harmonie = get_max_coverage_package(metno_harmonie_metcoop, area_name=area_name)
package_fmi_hirlam = get_max_coverage_package(fmi_hirlam_surface, area_name=area_name)
package_harmonie.make_package()
package_fmi_hirlam.make_package()
package_harmonie.download_package()
package_fmi_hirlam.download_package()
data_harmonie = xr.open_dataset(package_harmonie.get_local_file_name())
data_fmi_hirlam = xr.open_dataset(package_fmi_hirlam.get_local_file_name(),decode_cf=False)
Explanation: One can easily see what kind of variables are available in given dataset by just calling methods:
long_names -- gives a long human readable name for variable, which is unfortunately not standardised in any way
standard_names -- gives variable names as defined in CF convention standard name table http://cfconventions.org/standard-names.html
variable_names -- names by which you can actually query data from the API
on a given dataset instance.
End of explanation
left = np.amin(data_harmonie['longitude'].data)
right = np.amax(data_harmonie['longitude'].data)
bottom = np.amin(data_harmonie['latitude'].data)
top = np.amax(data_harmonie['latitude'].data)
package_gfs = package_api.package_api(dh,gfs.datasetkey,sample_var_names[gfs],left,right,bottom,top,area_name=area_name)
package_gfs.make_package()
package_gfs.download_package()
data_gfs = xr.open_dataset(package_gfs.get_local_file_name(),decode_cf=False)
Explanation: Take GFS for area of HARMONIE
End of explanation
m = Basemap(projection='ortho',lon_0=10,lat_0=50,resolution='l')
hir_x,hir_y=np.meshgrid(data_fmi_hirlam['lon'],data_fmi_hirlam['lat'])
X_hir,Y_hir=m(hir_x,hir_y)
fig=plt.figure()
plt.subplot(221)
air2d = data_fmi_hirlam[sample_var_names[fmi_hirlam_surface]][0,0,:,:]
air2d = np.ma.masked_where(air2d>500,air2d)
random_data = np.random.rand(947, 5294)
random_x = np.random.rand(947, 5294)
random_y = np.random.rand(947, 5294)
#m.pcolormesh(X_hir,Y_hir,random_data)
m.pcolormesh(random_x,random_y,random_data,vmin=np.min(random_data),vmax=np.max(random_data))
m.drawcoastlines()
plt.subplot(222)
harm_x,harm_y=np.meshgrid(data_harmonie.longitude,data_harmonie.latitude)
X_harm,Y_harm=m(harm_x,harm_y)
m.pcolormesh(X_harm,Y_harm,data_harmonie[sample_var_names[metno_harmonie_metcoop]][0,0,:,:])
m.drawcoastlines()
plt.colorbar()
Explanation: Dataset extent and resolution
Get some arbitrary field for demonstration, we use 2m temperature and as you can see, variable names may actually differ a lot between datasets. Please note that "get_tds_field" method is just for getting arbitrary preview image, if you wan't to query data for specific time and reftime, please refer to examples for our raster API (shown in other notebooks referenced to above) or use THREDDS server link given in dataset detail pages.
Extent
The easiest way to show dataset extent is to plot it on a map with proper projection. We do not show GFS here, because, well, it is global.
End of explanation
lon1,lon2 = 5,7
lat1,lat2 = 58,59
m2 = Basemap(projection='merc',llcrnrlat=lat1,urcrnrlat=lat2,\
llcrnrlon=lon1,urcrnrlon=lon2,lat_ts=58,resolution='i')
fig=plt.figure(figsize=(8,8))
plt.subplot(221)
## we cannot use .sel() method on hirlam data because
##it was opened with decode_cf=False
## which was because it contains both missing_value and fill_value, see https://github.com/pydata/xarray/issues/1749
x1 = np.argmin(np.abs(data_fmi_hirlam.lon-360-lon1)).data
x2 = np.argmin(np.abs(data_fmi_hirlam.lon-360-lon2)).data+1
y1 = np.argmin(np.abs(data_fmi_hirlam.lat-lat1)).data
y2 = np.argmin(np.abs(data_fmi_hirlam.lat-lat2)).data+1
height = int(np.argmin(np.abs(data_fmi_hirlam.height_above_ground-2)).data)
hir_x,hir_y=np.meshgrid(data_fmi_hirlam.lon[x1:x2].data,data_fmi_hirlam.lat[y1:y2].data)
X,Y=m2(hir_x-360,hir_y)
air2d_hirlam=data_fmi_hirlam.variables[sample_var_names[fmi_hirlam_surface]].isel(time=0,height_above_ground=height,lon=slice(x1,x2),lat=slice(y1,y2))
m2.pcolormesh(X,Y,air2d_hirlam)
m2.drawcoastlines()
plt.colorbar()
plt.subplot(222)
X,Y=m2(harm_x,harm_y)
air2d_harm = data_harmonie[sample_var_names[metno_harmonie_metcoop]].isel(time=0).sel(height1=2,longitude=slice(lon1,lon2),latitude=slice(lat1,lat2))
X,Y=m2(air2d_harm.longitude.data,air2d_harm.latitude.data)
m2.pcolormesh(X,Y,air2d_harm)
m2.drawcoastlines()
plt.colorbar()
plt.subplot(223)
ggg = data_gfs[sample_var_names[gfs]].isel(time1=0).sel(height_above_ground2=2,lon=slice(lon1,lon2),lat=slice(lat2,lat1))
x,y=np.meshgrid(ggg.lon,ggg.lat)
X,Y=m2(x,y)
m2.pcolormesh(X,Y,ggg)
m2.drawcoastlines()
plt.colorbar()
Explanation: Resolution
Let's zoom in a little to illustrate difference in resolutions. By plotting the gridded data as a mesh, one can easily get the grid size from the figures. Plot's given for the Norwegian coast.
End of explanation
longitude= 25.60
latitude = 58.36
ds = dataset('noaa_rbsn_timeseries',dh)
obs_data = ds.get_station_data_as_pandas(['26233'],variables='temperature',start = reftime_start)
sample_point_data = [(k,k.get_json_data_in_pandas(**{'var':v,'lon':longitude,'lat':latitude,'count':1000,'reftime_start':reftime_start,'reftime_end':reftime_end})) for k,v in sample_var_names.items()]
fig = plt.figure(figsize=(11,6))
for ddd in sample_point_data:
zlevels = [2.]
for i in zlevels:
pdata = np.array(ddd[1][ddd[1]['z']==i][sample_var_names[ddd[0]]],dtype=np.float) - 273.15
if np.sum(np.isnan(pdata)) != pdata.shape[0]:
time = ddd[1][ddd[1]['z']==i]['time']
if 'gfs' in ddd[0].datasetkey:
time = time[:-95]
pdata = pdata[:-95]
plt.plot(time, pdata, label = ddd[0].datasetkey)
plt.plot(obs_data['26233'].index,obs_data['26233']['temperature'].values,label = 'observations')
plt.legend()
plt.grid()
fig.autofmt_xdate()
plt.title('2m temperature forecast in different weather models')
plt.show()
Explanation: Can you guess which model is on which map by just looking at these images?
Forecast for a single location
First, get point data for all datasets for given variable and for as long time range as the forecast goes.
End of explanation |
14,717 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Features for Trajectory Recommendation
Features
Load Data
Compute POI Info
Construct Travelling Sequences
Compute Some Sequence Statistics
Compute Transition Probabilities
Basic Definitions
Transition Probabilities between POI Categories
Transition Probabilities between POI Popularity Classes
Transition Probabilities between POI Pair Distance Classes
Compute Trajectory Likelihood (For tuning the discretization strategy)
Log Likelihood of Actual Trajectories
Log Likelihood of Enumerated Trajectories
Compare the Log Likelihood of Actual and Enumerated Trajectories
Compute the F1-score of Enumerated Trajectories
<a id='sec1'></a>
1. Features
POI Features ?
POI category
POI popularity
POI location
Transition Features
POI category (a transition matrix between different categories)
POI popularity (a transition matrix between different class of popularity)
POI pair distance (a transition matrix between different class of distance)
Computation Steps
First compute the above features using a set of travelling sequences,
Then compute the log likelihood of (actual/enumerated) sequences using POI category/popularity transition matrix and POI pair distance transition matrix and make comparison.
<a id='sec2'></a>
2. Load Data
Step1: <a id='sec2.1'></a>
2.1 Compute POI Info
Compute POI (Longitude, Latitude) as the average coordinates of the assigned photos.
Step2: Extract POI category and visiting frequency.
Step3: <a id='sec2.2'></a>
2.2 Construct Travelling Sequences
Step4: <a id='sec2.3'></a>
2.3 Compute Some Sequence Statistics
Step5: Sequences with length {3, 4, 5}
Step6: <a id='sec3'></a>
3. Compute Transition Probabilities
<a id='sec3.1'></a>
3.1 Basic Definitions
$\text{Pr}(\text{POI}_i \to \text{POI}_j)$
Step7: <a id='sec3.3'></a>
3.3 Transition Probabilities between POI Popularity Classes
We model transition probabilities between POI popularities, i.e.
$\text{Pr}(\text{Pop}{\text{POI}_i} \to \text{Pop}{\text{POI}_j})$
after discretizing POI popularities.
3.3.1 Discretize POI Popularity
What the general criteria of data discretization? Discretization of continuous features from Wikipedia.
~~TODO
Step8: It could be seen from the above plot that discretization based on equal frequency (quantiles) performs better than that based on equal width, to balance the complexity and accuracy, we choose "quantile, nbins=9".
Step9: Quantile based bins (equal frequency)
Step10: Equal width bins
Step11: Another Equal frequency bins
Step12: 3.3.2 Compute Transition Probabilities
Step14: <a id='sec3.4'></a>
3.4 Transition Probabilities between POI Pair Distance Classes
We model transition probabilities between different POI pair distances, i.e.
$\text{Pr}(\text{Dist}{\text{POI}{i-1} \to \text{POI}i} \to \text{Dist}{\text{POI}_{i} \to \text{POI}_j})$
after discretize POI pair distances.
TODO
Step15: 3.4.2 Discretize POI Pair Distance
We use the same metrics as described above to choose a discretization strategy for POI pair distances.
<table>
<tr><td><b>descretization strategy</b></td><td><b>actSeqRank(Top%) Toronto (smaller is better)</b></td><td><b>rankedTop5(%) Toronto (larger is better)</b></td><td><b>actSeqRank(Top%) Glasgow</b></td><td><b>rankedTop5(%) Glasgow</b></td></tr>
<tr><td>quantile, nbins=2</td><td>mean
Step16: Remove rows that contain NaN and plot the curve, we choose quantile, nbins=10 to balance the complexity and accuracy.
Step17: Quantile based bins (equal frequency)
Step18: Equal width bins
Step19: Another Equal frequency bins
Step20: 3.4.3 Compute Transition Probabilities
Use POI pair that is observed in dataset to compute the transition matrix between different "class" of distances.
Step21: <a id='sec4'></a>
4. Compute Trajectory Likelihood (For tuning the discretization strategy)
Log likelihood of trajectory $[\text{POI}1, \text{POI}_2, \dots, \text{POI}_i, ..., \text{POI}_N]$ is defined as
\begin{align}
\text{logl} =&
\sum{i=1}^{N-1} \log(\text{Pr}(\text{Cat}{\text{POI}_i} \to \text{Cat}{\text{POI}{i+1}})) +
\sum{i=1}^{N-1} \log(\text{Pr}(\text{Pop}{\text{POI}_i} \to \text{Pop}{\text{POI}{i+1}})) +
\sum{i=2}^{N-1} \log(\text{Pr}(\text{Dist}{\text{POI}{i-1} \to \text{POI}i} \to
\text{Dist}{\text{POI}{i} \to \text{POI}{i+1}})) \
& + \log(\text{Pr}(\text{POI}_1))
\end{align}
where $\text{Pr}(\text{POI}_1)$ is the prior of $\text{POI}_1$ and we assume $\text{Pr}(\text{POI}_1)=1.0$, 10-based logarithm is used here.
Step22: Simple check.
Step23: <a id='sec4.1'></a>
4.1 Log Likelihood of Actual Trajectories
To save computation power, we consider unique travelling sequences only (i.e. treat sequences with the same list of POI and visiting order of different users as one sequence) as no user-specific features are used here.
Step24: <a id='sec4.2'></a>
4.2 Log Likelihood of Enumerated Trajectories
Compute log likelihood of enumerated trajectories for all unique actual sequences of length {3, 4, 5}.
Step25: Enumerate trajectories of the same (start, end) and length (3, 4 or 5) with respect to an actual sequence.
Step26: Compute the log likelihood of enumerated trajectories.
Step27: <a id='sec4.3'></a>
4.3 Compare the Log Likelihood of Actual and Enumerated Trajectories
Compare the log likelihood between actual sequences $S_a$ and the one of highest log likelihood among enumerated sequences with respect to $S_a$ as well as the log likelihood rank of $S_a$. | Python Code:
%matplotlib inline
import os
import re
import math
import random
import pickle
import pandas as pd
import numpy as np
import scipy.stats
#from numba import jit
from datetime import datetime
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
nfeatures = 8 # number of features
EPS = 1e-12 # smooth, deal with 0 probability
random.seed(123456789) # control random choice when splitting training/testing set
data_dir = 'data/data-ijcai15'
#fvisit = os.path.join(data_dir, 'userVisits-Osak.csv')
#fcoord = os.path.join(data_dir, 'photoCoords-Osak.csv')
#fvisit = os.path.join(data_dir, 'userVisits-Glas.csv')
#fcoord = os.path.join(data_dir, 'photoCoords-Glas.csv')
#fvisit = os.path.join(data_dir, 'userVisits-Edin.csv')
#fcoord = os.path.join(data_dir, 'photoCoords-Edin.csv')
fvisit = os.path.join(data_dir, 'userVisits-Toro.csv')
fcoord = os.path.join(data_dir, 'photoCoords-Toro.csv')
suffix = fvisit.split('-')[-1].split('.')[0]
visits = pd.read_csv(fvisit, sep=';')
coords = pd.read_csv(fcoord, sep=';')
# merge data frames according to column 'photoID'
assert(visits.shape[0] == coords.shape[0])
traj = pd.merge(visits, coords, on='photoID')
traj.head()
num_photo = traj['photoID'].unique().shape[0]
num_user = traj['userID'].unique().shape[0]
num_poi = traj['poiID'].unique().shape[0]
num_seq = traj['seqID'].unique().shape[0]
pd.DataFrame({'#photo': num_photo, '#user': num_user, '#poi': num_poi, '#seq': num_seq, \
'#photo/user': num_photo/num_user, '#seq/user': num_seq/num_user}, index=[str(suffix)])
#plt.figure(figsize=[15, 5])
#plt.xlabel('Longitude')
#plt.ylabel('Latitude')
#plt.scatter(traj['photoLon'], traj['photoLat'], marker='+')
Explanation: Features for Trajectory Recommendation
Features
Load Data
Compute POI Info
Construct Travelling Sequences
Compute Some Sequence Statistics
Compute Transition Probabilities
Basic Definitions
Transition Probabilities between POI Categories
Transition Probabilities between POI Popularity Classes
Transition Probabilities between POI Pair Distance Classes
Compute Trajectory Likelihood (For tuning the discretization strategy)
Log Likelihood of Actual Trajectories
Log Likelihood of Enumerated Trajectories
Compare the Log Likelihood of Actual and Enumerated Trajectories
Compute the F1-score of Enumerated Trajectories
<a id='sec1'></a>
1. Features
POI Features ?
POI category
POI popularity
POI location
Transition Features
POI category (a transition matrix between different categories)
POI popularity (a transition matrix between different class of popularity)
POI pair distance (a transition matrix between different class of distance)
Computation Steps
First compute the above features using a set of travelling sequences,
Then compute the log likelihood of (actual/enumerated) sequences using POI category/popularity transition matrix and POI pair distance transition matrix and make comparison.
<a id='sec2'></a>
2. Load Data
End of explanation
poi_coords = traj[['poiID', 'photoLon', 'photoLat']].groupby('poiID').mean()
poi_coords.reset_index(inplace=True)
poi_coords.rename(columns={'photoLon':'poiLon', 'photoLat':'poiLat'}, inplace=True)
Explanation: <a id='sec2.1'></a>
2.1 Compute POI Info
Compute POI (Longitude, Latitude) as the average coordinates of the assigned photos.
End of explanation
poi_catfreq = traj[['poiID', 'poiTheme', 'poiFreq']].groupby('poiID').first()
poi_catfreq.reset_index(inplace=True)
poi_all = pd.merge(poi_catfreq, poi_coords, on='poiID')
poi_all.set_index('poiID', inplace=True)
#poi_all.to_csv(fpoi, index=True)
Explanation: Extract POI category and visiting frequency.
End of explanation
seq_all = traj[['userID', 'seqID', 'poiID', 'dateTaken']].copy().groupby(['userID', 'seqID', 'poiID'])\
.agg([np.min, np.max, np.size])
seq_all.columns = seq_all.columns.droplevel()
seq_all.reset_index(inplace=True)
seq_all.rename(columns={'amin':'arrivalTime', 'amax':'departureTime', 'size':'#photo'}, inplace=True)
seq_all['poiDuration(sec)'] = seq_all['departureTime'] - seq_all['arrivalTime']
#seq_all.head()
seq_user = seq_all[['userID', 'seqID', 'poiID']].copy().groupby(['userID', 'seqID']).agg(np.size)
seq_user.reset_index(inplace=True)
seq_user.rename(columns={'size':'seqLen'}, inplace=True)
seq_user.set_index('seqID', inplace=True)
#seq_user.head()
Explanation: <a id='sec2.2'></a>
2.2 Construct Travelling Sequences
End of explanation
seq_len = seq_all[['seqID', 'poiID']].copy().groupby('seqID').agg(np.size)
seq_len.reset_index(inplace=True)
seq_len.rename(columns={'poiID':'seqLen'}, inplace=True)
#seq_len.head()
seq_stats = seq_all[['seqID', '#photo', 'poiDuration(sec)']].copy().groupby('seqID').agg(np.sum)
seq_stats.reset_index(inplace=True)
#seq_stats.rename(columns={'poiDuration(sec)':'totalPoiDuration(sec)'}, inplace=True)
seq_stats = pd.merge(seq_len, seq_stats, on='seqID')
seq_stats['poiDuration(sec)'] /= 60
seq_stats.rename(columns={'poiDuration(sec)':'totalPoiDuration(min)'}, inplace=True)
seq_stats.set_index('seqID', inplace=True)
#seq_stats.head()
#ax = seq_stats['seqLen'].hist(bins=50)
#ax.set_xlabel('sequence length')
#ax.set_ylim([0.1, 1e4])
#ax.set_yscale('log')
#ax = seq_stats['#photo'].hist(bins=50)
#ax.set_xlabel('#photo for sequence')
#ax.set_ylim([0.1, 1e4])
#ax.set_yscale('log')
#ax = seq_stats['totalPoiDuration(min)'].hist(bins=100)
#ax.set_xlabel('totalPoiDuration(min)')
#ax.set_ylim([0.1, 1e4])
#ax.set_yscale('log')
#ax.set_xscale('log')
Explanation: <a id='sec2.3'></a>
2.3 Compute Some Sequence Statistics
End of explanation
#seq_stats = seq_stats[seq_stats['seqLen'].isin({3, 4, 5})]
#ax = seq_stats['totalPoiDuration(min)'].hist(bins=50)
#ax.set_xlabel('totalPoiDuration(min)')
#ax.set_ylim([0.1, 1e4])
#ax.set_yscale('log')
def extract_seq(seqid, seq_all):
seqi = seq_all[seq_all['seqID'] == seqid].copy()
seqi.sort(columns=['arrivalTime'], ascending=True, inplace=True)
return seqi['poiID'].tolist()
Explanation: Sequences with length {3, 4, 5}
End of explanation
def calc_poi_cat_transmat(seqid_set, poi_all, seq_all):
poi_cats = poi_all['poiTheme'].unique().tolist()
poi_cats.sort()
poi_cat_transmat = pd.DataFrame(data=np.zeros((len(poi_cats), len(poi_cats)), dtype=np.float), \
index=poi_cats, columns=poi_cats)
for seqid in seqid_set:
seq = extract_seq(seqid, seq_all)
for j in range(len(seq)-1):
poi1 = seq[j]
poi2 = seq[j+1]
cat1 = poi_all.loc[poi1, 'poiTheme']
cat2 = poi_all.loc[poi2, 'poiTheme']
poi_cat_transmat.loc[cat1, cat2] += 1
return poi_cat_transmat
def normalise_transmat(transmat):
assert(isinstance(transmat, pd.DataFrame))
for row in range(transmat.index.shape[0]):
nonzeroidx = np.nonzero(transmat.iloc[row])[0].tolist()
if len(nonzeroidx) < transmat.columns.shape[0]:
minv = np.min(transmat.iloc[row, nonzeroidx])
EPS = 0.1 * minv # row-specific smooth factor
#zeroidx = list(set(range(len(transmat.columns))) - set(nonzeroidx))
#transmat.iloc[row, zeroidx] = EPS
transmat.iloc[row] += EPS
rowsum = np.sum(transmat.iloc[row])
assert(rowsum > 0)
transmat.iloc[row] /= rowsum
return transmat
poi_cat_transmat = calc_poi_cat_transmat(seq_all['seqID'].unique(), poi_all, seq_all)
poi_cat_transmat
poi_cat_transmat = normalise_transmat(poi_cat_transmat)
poi_cat_transmat
poi_cat_transmat_log = np.log10(poi_cat_transmat)
poi_cat_transmat_log
Explanation: <a id='sec3'></a>
3. Compute Transition Probabilities
<a id='sec3.1'></a>
3.1 Basic Definitions
$\text{Pr}(\text{POI}_i \to \text{POI}_j)$:
the transition probability from $\text{POI}_i$ to $\text{POI }_j$
$\text{Pr}(\text{Cat}_i \to \text{Cat}_j)$:
the transition probability from a POI of category $\text{Cat}_i$ to a POI of category $\text{Cat}_j$
$\text{Pr}(\text{Pop}_i \to \text{Pop}_j)$:
the transition probability from a POI of Popularity class $\text{Pop}_i$ to a POI of Popularity class $\text{Pop}_j$
$\text{Pr}(\text{Dist}_i \to \text{Dist}_j)$:
the transition probability from a POI-POI pair with distance (between the two) class $\text{Dist}_i$ to a POI-POI pair with distance (between the two) class $\text{Dist}_j$
By design, $\text{Pr}(\text{POI}_i \to \text{POI}_j)$ should be bigger
if any of $\text{Pr}(\text{Cat}_i \to \text{Cat}_j)$, $\text{Pr}(\text{Pop}_i \to \text{Pop}_j)$ and
$\text{Pr}(\text{Dist}_i \to \text{Dist}_j)$ becomes bigger (if other factors stay the same).
So how to combine these probabilities?
Both addition and multiplication seems to be able to serve this purpose, what is the difference?
The Addition Case
For the addtion case,
\begin{equation}
\text{Pr}(\text{POI}i \to \text{POI}_j) = \frac{
\text{Pr}(\text{Cat}{\text{POI}i} \to \text{Cat}{\text{POI}j}) +
\text{Pr}(\text{Pop}{\text{POI}i} \to \text{Pop}{\text{POI}j}) +
\text{Pr}(\text{Dist}{\text{POI}{i-1} \to \text{POI}_i} \to \text{Dist}{\text{POI}_{i} \to \text{POI}_j})
}
{Z_i}
\end{equation}
where $\text{POI}_{i-1}$ is the direct predecessor of $\text{POI}_i$ in a trajectory and $Z_i$ is a normalizing constant.
The Multiplication Case
For the multiplication case,
\begin{equation}
\text{Pr}(\text{POI}i \to \text{POI}_j) = \frac{
\text{Pr}(\text{Cat}{\text{POI}i} \to \text{Cat}{\text{POI}j}) \times
\text{Pr}(\text{Pop}{\text{POI}i} \to \text{Pop}{\text{POI}j}) \times
\text{Pr}(\text{Dist}{\text{POI}{i-1} \to \text{POI}_i} \to \text{Dist}{\text{POI}_{i} \to \text{POI}_j})
}
{Z_i}
\end{equation}
similarly, $\text{POI}_{i-1}$ is the direct predecessor of $\text{POI}_i$ in a trajectory and $Z_i$ is again a normalizing constant.
The Difference between Addition and Multiplication
It is important to note the fact that, by design, $\text{Pr}(\text{POI}_i \to \text{POI}_j)$ should be very small
if any of $\text{Pr}(\text{Cat}_i \to \text{Cat}_j)$, $\text{Pr}(\text{Pop}_i \to \text{Pop}_j)$ and
$\text{Pr}(\text{Dist}_i \to \text{Dist}_j)$ is very small, in the extreme case, if any of the three probabilities is $0$, then $\text{Pr}(\text{POI}_i \to \text{POI}_j)$ should be $0$
because the event "Transition from POI$_i$ to POI$_j$" is impossible.
From the equation of the addition case, it is clear that the addition rule contradicts the above fact while the multiplication rule is consistent with it.
Intuitively, the addition rule could make an unlikely event become much more likely, specifically,
make an impossible event become possible.
<a id='sec3.2'></a>
3.2 Transition Probabilities between POI Categories
We model transition probabilities between POI categories, i.e.
$\text{Pr}(\text{Cat}{\text{POI}_i} \to \text{Cat}{\text{POI}_j})$.
We count the number of transition first, then normalise each row while taking care of zero by adding each cell a small number (i.e. $0.1$ times the minimum value of that row) if there exists a zero cell.
End of explanation
rank_mean_Toro = [18.496, 16.049, 16.478, 16.811, 15.049, 15.831, 15.567, 14.556, 14.398, 14.874, 13.491, 13.528, \
13.279, 12.784, 24.761, 21.841, 23.636, 20.154, 19.129, 16.922, 18.240, 17.507, 18.196, 17.711, \
17.389, 15.820, 15.681, 15.712, 15.977]
rank_std_Toro = [24.746, 19.873, 21.002, 22.159, 18.722, 20.140, 19.811, 17.937, 17.694, 18.915, 16.760, 16.964, \
15.960, 15.796, 29.356, 28.366, 29.252, 26.713, 25.530, 20.619, 22.739, 22.659, 23.321, 23.398, \
22.718, 20.856, 19.559, 19.794, 20.373]
rank_mean_Glas = [16.328, 16.188, 15.130, 14.316, 14.581, 14.777, 15.019, 14.255, 13.771, 13.568, 11.464, 12.416, \
12.596, 12.209, 23.705, 23.225, 19.416, 19.201, 19.907, 17.030, 19.977, 18.183, 18.158, 15.555, \
15.174, 15.184, 12.922, 14.274, 20.427]
rank_std_Glas = [19.763, 18.117, 18.643, 17.294, 17.522, 17.175, 17.101, 16.184, 16.043, 15.522, 13.896, 13.081, \
14.259, 13.527, 24.304, 25.065, 22.059, 23.250, 23.132, 19.898, 23.118, 22.388, 21.773, 19.722, \
17.188, 18.837, 15.119, 16.828, 21.596]
rank_top5_Toro = [49.25, 49.25, 49.62, 47.74, 48.12, 48.87, 50.75, 51.88, 50.75, 51.88, 54.51, 54.14, \
53.38, 56.77, 43.23, 47.74, 45.11, 46.99, 46.99, 47.37, 45.49, 48.50, 46.99, 49.25, \
49.62, 53.01, 51.13, 51.13, 49.62]
rank_top5_Glas = [55, 52, 58, 61, 58, 59, 56, 64, 62, 63, 66, 63, 65, 66, 47, 47, 53, 54, 53, 57, 54, 55, 52, 60, 60, \
61, 63, 61, 50]
#xlabels = ['qbins='+str(x) for x in range(2, 16)]
#xlabels.extend(['ebins='+str(x) for x in range(2, 16)])
#xlabels.append('another')
xlabels = [x for x in range(2, len(rank_mean_Toro)+2)]
plt.figure(figsize=[15, 10])
plt.xlim([0, len(rank_mean_Toro)+2])
plt.ylim([-20, 100])
plt.errorbar(xlabels, rank_mean_Toro, rank_std_Toro, linestyle='--', marker='s', label='errorbar_Toronto')
plt.errorbar(xlabels, rank_mean_Glas, rank_std_Glas, linestyle='--', marker='s', label='errorbar_Glasgow')
plt.plot(xlabels, rank_top5_Toro, linestyle='--', marker='s', label='top5_Toronto')
plt.plot(xlabels, rank_top5_Glas, linestyle='--', marker='s', label='top5_Glasgow')
plt.legend()
#idx = 10
idx = 7
plt.annotate('choose', xy=(xlabels[idx], rank_top5_Glas[idx]), xytext=(xlabels[idx], rank_top5_Glas[idx]+15), \
arrowprops=dict(facecolor='green', shrink=0.1))
Explanation: <a id='sec3.3'></a>
3.3 Transition Probabilities between POI Popularity Classes
We model transition probabilities between POI popularities, i.e.
$\text{Pr}(\text{Pop}{\text{POI}_i} \to \text{Pop}{\text{POI}_j})$
after discretizing POI popularities.
3.3.1 Discretize POI Popularity
What the general criteria of data discretization? Discretization of continuous features from Wikipedia.
~~TODO: Improve the discritization using Fayyad's minimum description length principle (MDLP) described by this paper~~.
NOTE: MDLP is a supervised discretization method which requires some sort of class labels that are not available here. We need an unsupervised discretization method, well-known ones including equal width, equal frequency, clustering based, etc.
Try different discretization strategies and choose the best one using the following metrics:
actSeqRank(Top%) = $100 \times \frac{\text{rank of actual sequence}~S_a}{\text{number of enumerated sequence w.r.p}~S_a}$
rankedTop5(%) = $100 \times \frac{\sum{\delta(\text{rank of actual sequence} \le 5)}}
{\text{number of actual sequence}}$, where $\delta(\text{True}) = 1$ and $\delta(\text{False}) = 0$
i.e. the percentage of actual sequence $S_a$ ranked top 5 among all enumerated sequences with respect to $S_a$.
<table>
<tr><td><b>descretization strategy</b></td><td><b>actSeqRank(Top%) Toronto (smaller is better)</b></td><td><b>rankedTop5(%) Toronto (larger is better)</b></td><td><b>actSeqRank(Top%) Glasgow</b></td><td><b>rankedTop5(%) Glasgow</b></td></tr>
<tr><td>quantile, nbins=2</td><td>mean: 18.496, std: 24.746</td><td>131/266 = 49.25%</td><td>mean: 16.328, std: 19.763</td><td>55/100 = 55.00%</td></tr>
<tr><td>quantile, nbins=3</td><td>mean: 16.049, std: 19.873</td><td>131/266 = 49.25%</td><td>mean: 16.188, std: 18.117</td><td>52/100 = 52.00%</td></tr>
<tr><td>quantile, nbins=4</td><td>mean: 16.478, std: 21.002</td><td>132/266 = 49.62%</td><td>mean: 15.130, std: 18.643</td><td>58/100 = 58.00%</td></tr>
<tr><td>quantile, nbins=5</td><td>mean: 16.811, std: 22.159</td><td>127/266 = 47.74%</td><td>mean: 14.316, std: 17.294</td><td>61/100 = 61.00%</td></tr>
<tr><td>quantile, nbins=6</td><td>mean: 15.049, std: 18.722</td><td>128/266 = 48.12%</td><td>mean: 14.581, std: 17.522</td><td>58/100 = 58.00%</td></tr>
<tr><td>quantile, nbins=7</td><td>mean: 15.831, std: 20.140</td><td>130/266 = 48.87%</td><td>mean: 14.777, std: 17.175</td><td>59/100 = 59.00%</td></tr>
<tr><td>quantile, nbins=8</td><td>mean: 15.567, std: 19.811</td><td>135/266 = 50.75%</td><td>mean: 15.019, std: 17.101</td><td>56/100 = 56.00%</td></tr>
<tr><td>quantile, nbins=9</td><td>mean: 14.556, std: 17.937</td><td>138/266 = 51.88%</td><td>mean: 14.255, std: 16.184</td><td>64/100 = 64.00%</td></tr>
<tr><td>quantile, nbins=10</td><td>mean: 14.398, std: 17.694</td><td>135/266 = 50.75%</td><td>mean: 13.771, std: 16.043</td><td>62/100 = 62.00%</td></tr>
<tr><td>quantile, nbins=11</td><td>mean: 14.874, std: 18.915</td><td>138/266 = 51.88%</td><td>mean: 13.568, std: 15.522</td><td>63/100 = 63.00%</td></tr>
<tr><td>quantile, nbins=12</td><td>mean: 13.491, std: 16.760</td><td>145/266 = 54.51%</td><td>mean: 11.464, std: 13.896</td><td>66/100 = 66.00%</td></tr>
<tr><td>quantile, nbins=13</td><td>mean: 13.528, std: 16.964</td><td>144/266 = 54.14%</td><td>mean: 12.416, std: 13.081</td><td>63/100 = 63.00%</td></tr>
<tr><td>quantile, nbins=14</td><td>mean: 13.279, std: 15.960</td><td>142/266 = 53.38%</td><td>mean: 12.596, std: 14.259</td><td>65/100 = 65.00%</td></tr>
<tr><td>quantile, nbins=15</td><td>mean: 12.784, std: 15.796</td><td>151/266 = 56.77%</td><td>mean: 12.209, std: 13.527</td><td>66/100 = 66.00%</td></tr>
<tr><td>equalWidth, nbins=2</td><td>mean: 24.761, std: 29.356</td><td>115/266 = 43.23%</td><td>mean: 23.705, std: 24.304</td><td>47/100 = 47.00%</td></tr>
<tr><td>equalWidth, nbins=3</td><td>mean: 21.841, std: 28.366</td><td>127/266 = 47.74%</td><td>mean: 23.225, std: 25.065</td><td>47/100 = 47.00%</td></tr>
<tr><td>equalWidth, nbins=4</td><td>mean: 23.636, std: 29.252</td><td>120/266 = 45.11%</td><td>mean: 19.416, std: 22.059</td><td>53/100 = 53.00%</td></tr>
<tr><td>equalWidth, nbins=5</td><td>mean: 20.154, std: 26.713</td><td>125/266 = 46.99%</td><td>mean: 19.201, std: 23.250</td><td>54/100 = 54.00%</td></tr>
<tr><td>equalWidth, nbins=6</td><td>mean: 19.129, std: 25.530</td><td>125/266 = 46.99%</td><td>mean: 19.907, std: 23.132</td><td>53/100 = 53.00%</td></tr>
<tr><td>equalWidth, nbins=7</td><td>mean: 16.922, std: 20.619</td><td>126/266 = 47.37%</td><td>mean: 17.030, std: 19.898</td><td>57/100 = 57.00%</td></tr>
<tr><td>equalWidth, nbins=8</td><td>mean: 18.240, std: 22.739</td><td>121/266 = 45.49%</td><td>mean: 19.977, std: 23.118</td><td>54/100 = 54.00%</td></tr>
<tr><td>equalWidth, nbins=9</td><td>mean: 17.507, std: 22.659</td><td>129/266 = 48.50%</td><td>mean: 18.183, std: 22.388</td><td>55/100 = 55.00%</td></tr>
<tr><td>equalWidth, nbins=10</td><td>mean: 18.196, std: 23.321</td><td>125/266 = 46.99%</td><td>mean: 18.158, std: 21.773</td><td>52/100 = 52.00%</td></tr>
<tr><td>equalWidth, nbins=11</td><td>mean: 17.711, std: 23.398</td><td>131/266 = 49.25%</td><td>mean: 15.555, std: 19.722</td><td>60/100 = 60.00%</td></tr>
<tr><td>equalWidth, nbins=12</td><td>mean: 17.389, std: 22.718</td><td>132/266 = 49.62%</td><td>mean: 15.174, std: 17.188</td><td>60/100 = 60.00%</td></tr>
<tr><td>equalWidth, nbins=13</td><td>mean: 15.820, std: 20.856</td><td>141/266 = 53.01%</td><td>mean: 15.184, std: 18.837</td><td>61/100 = 61.00%</td></tr>
<tr><td>equalWidth, nbins=14</td><td>mean: 15.681, std: 19.559</td><td>136/266 = 51.13%</td><td>mean: 12.922, std: 15.119</td><td>63/100 = 63.00%</td></tr>
<tr><td>equalWidth, nbins=15</td><td>mean: 15.712, std: 19.794</td><td>136/266 = 51.13%</td><td>mean: 14.274, std: 16.828</td><td>61/100 = 61.00%</td></tr>
<tr><td>another, bins=[0, 500, 1500, 10000]</td><td>mean: 15.977, std: 20.373</td><td>132/266 = 49.62%</td><td>mean: 20.427, std: 21.596</td><td>50/100 = 50.00%</td></tr>
</table>
End of explanation
poi_all['poiFreq'].get_values()
poi_all['poiFreq'].describe()
poi_all['poiFreq'].quantile([.25, .5, .75]).tolist()
ax = poi_all['poiFreq'].hist(bins=10)
ax.set_xlabel('POI Popularity')
ax.set_ylabel('#POI')
#plt.plot(np.ones(poi_all.index.shape[0]), np.sqrt(poi_all['poiFreq']), marker='+')
Explanation: It could be seen from the above plot that discretization based on equal frequency (quantiles) performs better than that based on equal width, to balance the complexity and accuracy, we choose "quantile, nbins=9".
End of explanation
nbins = 9
quantiles = np.round(np.linspace(0, 1, nbins+1), 2)[1:-1]
quantiles
bins_qt = [0]
bins_qt.extend(poi_all['poiFreq'].quantile(quantiles))
bins_qt.append(poi_all['poiFreq'].max() + 1)
bins_qt
Explanation: Quantile based bins (equal frequency)
End of explanation
#nbins = 15
#inter = round((poi_all['poiFreq'].max() + 1) / nbins)
#bins_ew = [x*inter for x in range(nbins)]
#bins_ew.append(poi_all['poiFreq'].max() + 1)
#bins_ew
Explanation: Equal width bins
End of explanation
#bins = np.linspace(0, 10000, 11)
#bins = np.logspace(0, 4, 5)
#bins = [1, 100, 500, 1000, 2000, 5000]
#bins_ef = [0, 500, 1500, 10000]
bins_pop = bins_qt
#bins_pop = bins_ew
#bins_pop = bins_ef
ax = poi_all['poiFreq'].hist(bins=bins_pop)
ax.set_xlabel('POI Popularity')
ax.set_ylabel('#POI')
ax.set_xscale('log')
poi_all['popClass'] = np.digitize(poi_all['poiFreq'].get_values(), bins_pop)
#poi_all
Explanation: Another Equal frequency bins
End of explanation
def calc_poi_pop_transmat(seqid_set, poi_all, seq_all):
pop_class = poi_all['popClass'].unique().tolist()
pop_class.sort()
poi_pop_transmat = pd.DataFrame(data=np.zeros((len(pop_class), len(pop_class)), dtype=np.float), \
index=pop_class, columns=pop_class)
for seqid in seqid_set:
seq = extract_seq(seqid, seq_all)
for j in range(len(seq)-1):
poi1 = seq[j]
poi2 = seq[j+1]
pc1 = poi_all.loc[poi1, 'popClass']
pc2 = poi_all.loc[poi2, 'popClass']
poi_pop_transmat.loc[pc1, pc2] += 1
return poi_pop_transmat
poi_pop_transmat = calc_poi_pop_transmat(seq_all['seqID'].unique(), poi_all, seq_all)
poi_pop_transmat
poi_pop_transmat = normalise_transmat(poi_pop_transmat)
poi_pop_transmat
poi_pop_transmat_log = np.log10(poi_pop_transmat)
poi_pop_transmat_log
Explanation: 3.3.2 Compute Transition Probabilities
End of explanation
def calc_dist(longitude1, latitude1, longitude2, latitude2):
Calculate the distance (unit: km) between two places on earth
# convert degrees to radians
lon1 = math.radians(longitude1)
lat1 = math.radians(latitude1)
lon2 = math.radians(longitude2)
lat2 = math.radians(latitude2)
radius = 6371.009 # mean earth radius is 6371.009km, en.wikipedia.org/wiki/Earth_radius#Mean_radius
# The haversine formula, en.wikipedia.org/wiki/Great-circle_distance
dlon = math.fabs(lon1 - lon2)
dlat = math.fabs(lat1 - lat2)
return 2 * radius * math.asin( math.sqrt( \
(math.sin(0.5*dlat))**2 + math.cos(lat1) * math.cos(lat2) * (math.sin(0.5*dlon))**2 ))
def calc_obs_poipair_distmat(seqid_set, poi_all, seq_all):
poi_distmat = pd.DataFrame(data=np.full((poi_all.shape[0], poi_all.shape[0]), np.nan, dtype=np.float), \
index=poi_all.index, columns=poi_all.index)
for seqid in seqid_set:
seq = extract_seq(seqid, seq_all)
if len(seq) < 2: continue
for j in range(len(seq)-1):
poi1 = seq[j]
poi2 = seq[j+1]
if np.isnan(poi_distmat.loc[poi1, poi2]):
dist = calc_dist(poi_all.loc[poi1, 'poiLon'], poi_all.loc[poi1, 'poiLat'], \
poi_all.loc[poi2, 'poiLon'], poi_all.loc[poi2, 'poiLat'])
poi_distmat.loc[poi1, poi2] = dist
poi_distmat.loc[poi2, poi1] = dist
return poi_distmat
poi_distmat = calc_obs_poipair_distmat(seq_all['seqID'].unique(), poi_all, seq_all)
#poi_distmat
Explanation: <a id='sec3.4'></a>
3.4 Transition Probabilities between POI Pair Distance Classes
We model transition probabilities between different POI pair distances, i.e.
$\text{Pr}(\text{Dist}{\text{POI}{i-1} \to \text{POI}i} \to \text{Dist}{\text{POI}_{i} \to \text{POI}_j})$
after discretize POI pair distances.
TODO: Improve the distance calculation using Google maps distance API with different travel modes demonstrated here.
3.4.1 Compute POI Pair Distance
Compute POI-pair distance if the pair is observed in dataset.
End of explanation
rank_mean_Toro = [15.156, 14.917, 14.389, 13.645, 14.299, 12.689, 12.996, 12.510, 12.467, 12.548, 11.980, \
12.170, 11.384, 11.444, 10.932, 10.991, 10.836, 15.110]
rank_std_Toro = [16.911, 17.484, 17.527, 16.550, 17.550, 14.674, 15.606, 14.946, 14.549, 14.758, 13.883, \
13.983, 12.787, 12.888, 12.621, 12.950, 12.383, 16.204]
rank_mean_Glas = [14.354, 14.450, 14.255, 14.085, 13.156, 12.755, 11.716, 11.355, 11.181, 10.214, 10.041, \
9.345, 9.008, 8.613, 8.553, 8.025, 7.922, 14.937]
rank_std_Glas = [16.541, 16.173, 16.184, 16.159, 14.436, 13.681, 13.003, 12.893, 12.107, 10.872, 10.895, \
9.759, 9.444, 9.052, 9.336, 8.502, 8.702, 16.630]
rank_top5_Toro = [45.11, 48.87, 52.26, 55.64, 51.88, 52.63, 53.76, 52.63, 54.89, 53.76, 55.26, 52.63, 57.52, \
56.77, 58.65, 58.65, 57.14, 43.98]
rank_top5_Glas = [60, 60, 64, 63, 63, 62, 64, 66, 68, 68, 73, 73, 73, 75, 74, 79, 79, 60]
xlabels = [x for x in range(2, len(rank_mean_Toro)+2)]
plt.figure(figsize=[15, 10])
plt.xlim([0, len(rank_mean_Toro)+2])
plt.ylim([-20, 100])
plt.errorbar(xlabels, rank_mean_Toro, rank_std_Toro, linestyle='--', marker='s', label='errorbar_Toronto')
plt.errorbar(xlabels, rank_mean_Glas, rank_std_Glas, linestyle='--', marker='s', label='errorbar_Glasgow')
plt.plot(xlabels, rank_top5_Toro, linestyle='--', marker='s', label='top5_Toronto')
plt.plot(xlabels, rank_top5_Glas, linestyle='--', marker='s', label='top5_Glasgow')
plt.legend()
#idx = 10
idx = 8
plt.annotate('choose', xy=(xlabels[idx], rank_top5_Glas[idx]), xytext=(xlabels[idx], rank_top5_Glas[idx]+15), \
arrowprops=dict(facecolor='green', shrink=0.1))
Explanation: 3.4.2 Discretize POI Pair Distance
We use the same metrics as described above to choose a discretization strategy for POI pair distances.
<table>
<tr><td><b>descretization strategy</b></td><td><b>actSeqRank(Top%) Toronto (smaller is better)</b></td><td><b>rankedTop5(%) Toronto (larger is better)</b></td><td><b>actSeqRank(Top%) Glasgow</b></td><td><b>rankedTop5(%) Glasgow</b></td></tr>
<tr><td>quantile, nbins=2</td><td>mean: 15.156, std: 16.911</td><td>120/266 = 45.11%</td><td>mean: 14.354, std: 16.541</td><td>60/100 = 60.00%</td></tr>
<tr><td>quantile, nbins=3</td><td>mean: 14.917, std: 17.484</td><td>130/266 = 48.87%</td><td>mean: 14.450, std: 16.173</td><td>60/100 = 60.00%</td></tr>
<tr><td>quantile, nbins=4</td><td>mean: 14.389, std: 17.527</td><td>139/266 = 52.26%</td><td>mean: 14.255, std: 16.184</td><td>64/100 = 64.00%</td></tr>
<tr><td>quantile, nbins=5</td><td>mean: 13.645, std: 16.550</td><td>148/266 = 55.64%</td><td>mean: 14.085, std: 16.159</td><td>63/100 = 63.00%</td></tr>
<tr><td>quantile, nbins=6</td><td>mean: 14.299, std: 17.550</td><td>138/266 = 51.88%</td><td>mean: 13.156, std: 14.436</td><td>63/100 = 63.00%</td></tr>
<tr><td>quantile, nbins=7</td><td>mean: 12.689, std: 14.674</td><td>140/266 = 52.63%</td><td>mean: 12.755, std: 13.681</td><td>62/100 = 62.00%</td></tr>
<tr><td>quantile, nbins=8</td><td>mean: 12.996, std: 15.606</td><td>143/266 = 53.76%</td><td>mean: 11.716, std: 13.003</td><td>64/100 = 64.00%</td></tr>
<tr><td>quantile, nbins=9</td><td>mean: 12.510, std: 14.946</td><td>140/266 = 52.63%</td><td>mean: 11.355, std: 12.893</td><td>66/100 = 66.00%</td></tr>
<tr><td>quantile, nbins=10</td><td>mean: 12.467, std: 14.549</td><td>146/266 = 54.89%</td><td>mean: 11.181, std: 12.107</td><td>68/100 = 68.00%</td></tr>
<tr><td>quantile, nbins=11</td><td>mean: 12.548, std: 14.758</td><td>143/266 = 53.76%</td><td>mean: 10.214, std: 10.872</td><td>68/100 = 68.00%</td></tr>
<tr><td>quantile, nbins=12</td><td>mean: 11.980, std: 13.883</td><td>147/266 = 55.26%</td><td>mean: 10.041, std: 10.895</td><td>73/100 = 73.00%</td></tr>
<tr><td>quantile, nbins=13</td><td>mean: 12.170, std: 13.983</td><td>140/266 = 52.63%</td><td>mean: 9.345, std: 9.759</td><td>73/100 = 73.00%</td></tr>
<tr><td>quantile, nbins=14</td><td>mean: 11.384, std: 12.787</td><td>153/266 = 57.52%</td><td>mean: 9.008, std: 9.444</td><td>73/100 = 73.00%</td></tr>
<tr><td>quantile, nbins=15</td><td>mean: 11.444, std: 12.888</td><td>151/266 = 56.77%</td><td>mean: 8.613, std: 9.052</td><td>75/100 = 75.00%</td></tr>
<tr><td>quantile, nbins=16</td><td>mean: 10.932, std: 12.621</td><td>156/266 = 58.65%</td><td>mean: 8.553, std: 9.336</td><td>74/100 = 74.00%</td></tr>
<tr><td>quantile, nbins=17</td><td>mean: 10.991, std: 12.950</td><td>156/266 = 58.65%</td><td>mean: 8.025, std: 8.502</td><td>79/100 = 79.00%</td></tr>
<tr><td>quantile, nbins=18</td><td>mean: 10.728, std: 12.285</td><td>155/266 = 58.27%</td><td>EMPTY ROW 13</td><td>NaN</td></tr>
<tr><td>quantile, nbins=19</td><td>mean: 10.754, std: 12.368</td><td>150/266 = 56.39%</td><td>EMPTY ROW 11</td><td>NaN</td></tr>
<tr><td>quantile, nbins=20</td><td>mean: 10.836, std: 12.383</td><td>152/266 = 57.14%</td><td>mean: 7.922, std: 8.702</td><td>79/100 = 79.00%</td></tr>
<tr><td>quantile, nbins=21</td><td>mean: 10.579, std: 12.301</td><td>162/266 = 60.90%</td><td>EMPTY ROW 15</td><td>NaN</td></tr>
<tr><td>equalWidth, nbins=2</td><td>EMPTY LAST BIN</td><td>NaN</td><td>EMPTY LAST BIN</td><td>NaN</td></tr>
<tr><td>equalWidth, nbins=3</td><td>EMPTY LAST BIN</td><td>NaN</td><td>EMPTY LAST TWO BIN</td><td>NaN</td></tr>
<tr><td>equalWidth, nbins=4</td><td>EMPTY LAST TWO BIN</td><td>NaN</td><td>EMPTY LAST TWO BIN</td><td>NaN</td></tr>
<tr><td>another, bins=[0, 2, 5, 100]</td><td>mean: 15.110, std: 16.204</td><td>117/266 = 43.98%</td><td>mean: 14.937, std: 16.630</td><td>60/100 = 60.00%</td></tr>
</table>
End of explanation
#distdata = pd.Series([x for x in np.unique(poi_distmat.get_values().flatten()) if not np.isnan(x)])
distdata = pd.Series([poi_distmat.iloc[x, y] \
for x in range(poi_distmat.index.shape[0]) \
for y in range(x+1, poi_distmat.index.shape[0]) \
if not np.isnan(poi_distmat.iloc[x, y])])
distdata.describe()
ax = distdata.hist(bins=20)
ax.set_xlabel('POI-Pair Distance (km)')
ax.set_ylabel('#POI-Pair')
Explanation: Remove rows that contain NaN and plot the curve, we choose quantile, nbins=10 to balance the complexity and accuracy.
End of explanation
nbins = 10
quantiles = np.round(np.linspace(0, 1, nbins+1), 2)[1:-1]
quantiles
bins_qt = [0]
bins_qt.extend(distdata.quantile(quantiles))
bins_qt.append(10*round(distdata.max()))
bins_qt
Explanation: Quantile based bins (equal frequency)
End of explanation
#nbins = 4
#inter = round((round(distdata.max()) + 1) / nbins)
#maxdist = 30 # Toronto, maximum distance among all POI pairs
#maxdist = 46 # Glasgow
#inter = round(maxdist / nbins)
#bins_ew = [x*inter for x in range(nbins)]
#bins_ew.append(maxdist)
#bins_ew
Explanation: Equal width bins
End of explanation
#bins = np.linspace(0, 10, 7)
#bins = np.logspace(0, 2, 4)
#bins = [0, 1, 2, 3, 10]
#bins_a = [0, 2, 5, 100] # walk, ride, drive
#bins_ef = [0, 1.15, 2.25, 100]
bins_dist = bins_qt
#bins_dist = bins_ew
#bins_dist = bins_ef
#bins_dist = bins_a
ax = distdata.ix[np.nonzero(distdata)].hist(bins=bins_dist)
ax.set_xlabel('POI-Pair Distance (km)')
ax.set_ylabel('#POI-Pair')
ax.set_xscale('log')
poi_distclass_mat = pd.DataFrame(data=np.zeros((poi_all.shape[0], poi_all.shape[0]), dtype=np.int), \
index=poi_all.index, columns=poi_all.index)
for i in range(poi_all.index.shape[0]):
poi1 = poi_all.index[i]
for j in range(i+1, poi_all.index.shape[0]):
poi2 = poi_all.index[j]
dc = None
if np.isnan(poi_distmat.loc[poi1, poi2]):
dist = calc_dist(poi_all.loc[poi1, 'poiLon'], poi_all.loc[poi1, 'poiLat'], \
poi_all.loc[poi2, 'poiLon'], poi_all.loc[poi2, 'poiLat'])
dc = np.digitize([dist], bins_dist)[0]
else:
dc = np.digitize([poi_distmat.loc[poi1, poi2]], bins_dist)[0]
assert(dc is not None)
poi_distclass_mat.loc[poi1, poi2] = dc
poi_distclass_mat.loc[poi2, poi1] = dc
poi_distclass_mat
Explanation: Another Equal frequency bins
End of explanation
def calc_poipair_dist_transmat(seqid_set, poi_all, seq_all, poi_distclass_mat, bins_dist):
dist_class = list(range(1, len(bins_dist)))
poipair_dist_transmat = pd.DataFrame(data=np.zeros((len(dist_class), len(dist_class)), dtype=np.float), \
index=dist_class, columns=dist_class)
for seqid in seqid_set:
seq = extract_seq(seqid, seq_all)
if len(seq) < 3: continue
for j in range(1, len(seq)-1):
poi1 = seq[j-1]
poi2 = seq[j]
poi3 = seq[j+1]
dc1 = poi_distclass_mat.loc[poi1, poi2]
dc2 = poi_distclass_mat.loc[poi2, poi3]
poipair_dist_transmat.loc[dc1, dc2] += 1
return poipair_dist_transmat
poipair_dist_transmat = calc_poipair_dist_transmat(seq_all['seqID'].unique(), poi_all, seq_all, \
poi_distclass_mat, bins_dist)
poipair_dist_transmat
poipair_dist_transmat = normalise_transmat(poipair_dist_transmat)
poipair_dist_transmat
poipair_dist_transmat_log = np.log10(poipair_dist_transmat)
poipair_dist_transmat_log
Explanation: 3.4.3 Compute Transition Probabilities
Use POI pair that is observed in dataset to compute the transition matrix between different "class" of distances.
End of explanation
def calc_seq_loglikelihood(seq, poi_all, poi_cat_transmat_log, poi_pop_transmat_log, \
poi_distclass_mat, poipair_dist_transmat_log):
assert(len(seq) > 1)
cat1 = poi_all.loc[seq[0], 'poiTheme']
cat2 = poi_all.loc[seq[1], 'poiTheme']
pc1 = poi_all.loc[seq[0], 'popClass']
pc2 = poi_all.loc[seq[1], 'popClass']
logL = poi_cat_transmat_log.loc[cat1, cat2] + poi_pop_transmat_log.loc[pc1, pc2]
for j in range(1, len(seq)-1):
poi1 = seq[j-1]
poi2 = seq[j]
poi3 = seq[j+1]
cat2 = poi_all.loc[poi2, 'poiTheme']
cat3 = poi_all.loc[poi3, 'poiTheme']
pc2 = poi_all.loc[poi2, 'popClass']
pc3 = poi_all.loc[poi3, 'popClass']
dc12 = poi_distclass_mat.loc[poi1, poi2]
dc23 = poi_distclass_mat.loc[poi2, poi3]
logL += poi_cat_transmat_log.loc[cat2, cat3] + poi_pop_transmat_log.loc[pc2, pc3]
#print(seq, dc12, dc23)
logL += poipair_dist_transmat_log.loc[dc12, dc23]
return logL
Explanation: <a id='sec4'></a>
4. Compute Trajectory Likelihood (For tuning the discretization strategy)
Log likelihood of trajectory $[\text{POI}1, \text{POI}_2, \dots, \text{POI}_i, ..., \text{POI}_N]$ is defined as
\begin{align}
\text{logl} =&
\sum{i=1}^{N-1} \log(\text{Pr}(\text{Cat}{\text{POI}_i} \to \text{Cat}{\text{POI}{i+1}})) +
\sum{i=1}^{N-1} \log(\text{Pr}(\text{Pop}{\text{POI}_i} \to \text{Pop}{\text{POI}{i+1}})) +
\sum{i=2}^{N-1} \log(\text{Pr}(\text{Dist}{\text{POI}{i-1} \to \text{POI}i} \to
\text{Dist}{\text{POI}{i} \to \text{POI}{i+1}})) \
& + \log(\text{Pr}(\text{POI}_1))
\end{align}
where $\text{Pr}(\text{POI}_1)$ is the prior of $\text{POI}_1$ and we assume $\text{Pr}(\text{POI}_1)=1.0$, 10-based logarithm is used here.
End of explanation
seq1 = [10, 21, 28, 22]
d12 = calc_dist(poi_all.loc[10,'poiLon'], poi_all.loc[10,'poiLat'], poi_all.loc[21,'poiLon'], poi_all.loc[21, 'poiLat'])
d23 = calc_dist(poi_all.loc[21,'poiLon'], poi_all.loc[21,'poiLat'], poi_all.loc[28,'poiLon'], poi_all.loc[28, 'poiLat'])
d34 = calc_dist(poi_all.loc[28,'poiLon'], poi_all.loc[28,'poiLat'], poi_all.loc[22,'poiLon'], poi_all.loc[22, 'poiLat'])
print(d12, d23, d34)
print(bins_dist)
s1 = poi_cat_transmat_log.loc[poi_all.loc[10, 'poiTheme'], poi_all.loc[21, 'poiTheme']] + \
poi_cat_transmat_log.loc[poi_all.loc[21, 'poiTheme'], poi_all.loc[28, 'poiTheme']] + \
poi_cat_transmat_log.loc[poi_all.loc[28, 'poiTheme'], poi_all.loc[22, 'poiTheme']] + \
poi_pop_transmat_log.loc[poi_all.loc[10, 'popClass'], poi_all.loc[21, 'popClass']] + \
poi_pop_transmat_log.loc[poi_all.loc[21, 'popClass'], poi_all.loc[28, 'popClass']] + \
poi_pop_transmat_log.loc[poi_all.loc[28, 'popClass'], poi_all.loc[22, 'popClass']]
s2 = poipair_dist_transmat_log.loc[np.digitize([d12], bins_dist)[0], np.digitize([d23], bins_dist)[0]] + \
poipair_dist_transmat_log.loc[np.digitize([d23], bins_dist)[0], np.digitize([d34], bins_dist)[0]]
print(s1+s2)
calc_seq_loglikelihood([10, 21, 28, 22], poi_all, poi_cat_transmat_log, poi_pop_transmat_log, \
poi_distclass_mat, poipair_dist_transmat_log)
def parse_seqstr(seqstr):
term = re.sub('[ \[\]]', '', seqstr).split(',')
return [int(x) for x in term]
Explanation: Simple check.
End of explanation
unique_seq = dict() # seq -> [(seqid, userid)]
for seqid in sorted(seq_all['seqID'].unique().tolist()):
seq = extract_seq(seqid, seq_all)
if str(seq) not in unique_seq:
unique_seq[str(seq)] = [(seqid, seq_user.loc[seqid])]
else:
unique_seq[str(seq)].append((seqid, seq_user.loc[seqid]))
unique_seq345 = [parse_seqstr(x) for x in sorted(unique_seq.keys()) if len(x.split(',')) in {3,4,5}]
unique_seq345_logL = pd.DataFrame(data=np.zeros((len(unique_seq345), 2), dtype=np.float), \
index=[str(x) for x in unique_seq345], columns=['logLikelihood', 'seqLen'])
unique_seq345_logL.index.name = 'actSeq'
for seq in unique_seq345:
assert(len(seq) in {3,4,5})
logL = calc_seq_loglikelihood(seq, poi_all, poi_cat_transmat_log, poi_pop_transmat_log, \
poi_distclass_mat, poipair_dist_transmat_log)
unique_seq345_logL.loc[str(seq), 'logLikelihood'] = logL
unique_seq345_logL.loc[str(seq), 'seqLen'] = len(seq)
#print('Sequence %-20s Log likelihood: %.3f' % (str(seq), logL))
print(unique_seq345_logL.index.shape[0])
unique_seq345_logL.head()
unique_seq345_logL['seqLen'].hist(bins=10)
Explanation: <a id='sec4.1'></a>
4.1 Log Likelihood of Actual Trajectories
To save computation power, we consider unique travelling sequences only (i.e. treat sequences with the same list of POI and visiting order of different users as one sequence) as no user-specific features are used here.
End of explanation
poi_list = poi_all.index.tolist()
#poi_list
Explanation: <a id='sec4.2'></a>
4.2 Log Likelihood of Enumerated Trajectories
Compute log likelihood of enumerated trajectories for all unique actual sequences of length {3, 4, 5}.
End of explanation
def enum_seq345(seq, poi_list):
assert(len(seq) in {3, 4, 5})
p0 = seq[0]
pN = seq[-1]
# enumerate sequences with length 3
if len(seq) == 3:
return [[p0, p, pN] \
for p in poi_list if p not in {p0, pN}]
# enumerate sequences with length 4
if len(seq) == 4:
return [[p0, p1, p2, pN] \
for p1 in poi_list if p1 not in {p0, pN} \
for p2 in poi_list if p2 not in {p0, p1, pN}]
# enumerate sequences with length 5
if len(seq) == 5:
return [[p0, p1, p2, p3, pN] \
for p1 in poi_list if p1 not in {p0, pN} \
for p2 in poi_list if p2 not in {p0, p1, pN} \
for p3 in poi_list if p3 not in {p0, p1, p2, pN}]
Explanation: Enumerate trajectories of the same (start, end) and length (3, 4 or 5) with respect to an actual sequence.
End of explanation
enum_logL_df = pd.DataFrame()
for seq in unique_seq345:
enum_seqs = enum_seq345(seq, poi_list)
df = pd.DataFrame(data=sorted([str(x) for x in enum_seqs]), columns=['enumSeq'])
df.set_index('enumSeq', inplace=True)
df['actSeq'] = str(seq)
enum_logL_df = enum_logL_df.append(df)
print(enum_logL_df.shape)
enum_logL_df.head()
t1 = datetime.now()
logL = Parallel(n_jobs=-2)(delayed(calc_seq_loglikelihood)\
(seq, poi_all, poi_cat_transmat_log, poi_pop_transmat_log, poi_distclass_mat, poipair_dist_transmat_log)\
for seq in [parse_seqstr(x) for x in enum_logL_df.index])
print('%d seconds used' % (datetime.now()-t1).total_seconds()) # 930 seconds
enum_logL_df['enumSeqLogLikelihood'] = logL
#enum_logL_df.head(23)
Explanation: Compute the log likelihood of enumerated trajectories.
End of explanation
df = pd.DataFrame(data=sorted([str(x) for x in unique_seq345]), columns=['actSeq'])
df.set_index('actSeq', inplace=True)
df['actSeqLogLikelihood'] = unique_seq345_logL.loc[df.index, 'logLikelihood']
df['enumSeq'] = ''
df['enumSeqLogLikelihood'] = 0
df['actSeqRank'] = 0
df['#enumSeq'] = 0
for seqstr in df.index:
sub_df = enum_logL_df[enum_logL_df['actSeq'] == seqstr].copy()
sub_df.reset_index(inplace=True)
sub_df.sort(columns=['enumSeqLogLikelihood'], ascending=False, inplace=True)
df.loc[seqstr, 'enumSeq'] = sub_df.iloc[0]['enumSeq']
df.loc[seqstr, 'enumSeqLogLikelihood'] = sub_df.iloc[0]['enumSeqLogLikelihood']
df.loc[seqstr, 'actSeqRank'] = 1 + np.nonzero(sub_df['enumSeq'] == seqstr)[0][0] # rank of actual sequence
df.loc[seqstr, '#enumSeq'] = sub_df.index.shape[0]
df['actSeqRank(Top%)'] = 100*df['actSeqRank']/df['#enumSeq']
#df
print('mean: %.3f, std: %.3f' % (round(df['actSeqRank(Top%)'].mean(),3), round(df['actSeqRank(Top%)'].std(),3)))
df['actSeqRank(Top%)'].describe()
ntop = np.nonzero(df['actSeqRank'] <= 5)[0].shape[0]
print('%d/%d = %.2f%%' % (ntop, df.index.shape[0], 100*ntop/df.index.shape[0]))
Explanation: <a id='sec4.3'></a>
4.3 Compare the Log Likelihood of Actual and Enumerated Trajectories
Compare the log likelihood between actual sequences $S_a$ and the one of highest log likelihood among enumerated sequences with respect to $S_a$ as well as the log likelihood rank of $S_a$.
End of explanation |
14,718 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
The use of watermark (above) is optional, and we use it to keep track of the changes while developing the tutorial material. (You can install this IPython extension via "pip install watermark". For more information, please see
Step1: (Note that NumPy arrays use 0-indexing just like other data structures in Python.)
Step2: $$\begin{bmatrix}
1 & 2 & 3 & 4 \
5 & 6 & 7 & 8
\end{bmatrix}^T
=
\begin{bmatrix}
1 & 5 \
2 & 6 \
3 & 7 \
4 & 8
\end{bmatrix}
$$
Step3: There is much, much more to know, but these few operations are fundamental to what we'll
do during this tutorial.
SciPy Sparse Matrices
We won't make very much use of these in this tutorial, but sparse matrices are very nice
in some situations. In some machine learning tasks, especially those associated
with textual analysis, the data may be mostly zeros. Storing all these zeros is very
inefficient, and representing in a way that only contains the "non-zero" values can be much more efficient. We can create and manipulate sparse matrices as follows
Step4: (You may have stumbled upon an alternative method for converting sparse to dense representations
Step5: Often, once an LIL matrix is created, it is useful to convert it to a CSR format
(many scikit-learn algorithms require CSR or CSC format)
Step6: The available sparse formats that can be useful for various problems are
Step7: There are many, many more plot types available. One useful way to explore these is by
looking at the matplotlib gallery.
You can test these examples out easily in the notebook | Python Code:
import numpy as np
# Setting a random seed for reproducibility
rnd = np.random.RandomState(seed=123)
# Generating a random array
X = rnd.uniform(low=0.0, high=1.0, size=(3, 5)) # a 3 x 5 array
print(X)
Explanation: The use of watermark (above) is optional, and we use it to keep track of the changes while developing the tutorial material. (You can install this IPython extension via "pip install watermark". For more information, please see: https://github.com/rasbt/watermark).
SciPy 2016 Scikit-learn Tutorial
01.2 Jupyter Notebooks
You can run a cell by pressing [shift] + [Enter] or by pressing the "play" button in the menu.
You can get help on a function or object by pressing [shift] + [tab] after the opening parenthesis function(
You can also get help by executing function?
Numpy Arrays
Manipulating numpy arrays is an important part of doing machine learning
(or, really, any type of scientific computation) in python. This will likely
be a short review for most. In any case, let's quickly go through some of the most important features.
End of explanation
# Accessing elements
# get a single element
# (here: an element in the first row and column)
print(X[0, 0])
# get a row
# (here: 2nd row)
print(X[1])
# get a column
# (here: 2nd column)
print(X[:, 1])
# Transposing an array
print(X.T)
Explanation: (Note that NumPy arrays use 0-indexing just like other data structures in Python.)
End of explanation
# Creating a row vector
# of evenly spaced numbers over a specified interval.
y = np.linspace(0, 12, 5)
print(y)
# Turning the row vector into a column vector
print(y[:, np.newaxis])
# Getting the shape or reshaping an array
# Generating a random array
rnd = np.random.RandomState(seed=123)
X = rnd.uniform(low=0.0, high=1.0, size=(3, 5)) # a 3 x 5 array
print(X.shape)
print(X.reshape(5, 3))
# Indexing by an array of integers (fancy indexing)
indices = np.array([3, 1, 0])
print(indices)
X[:, indices]
Explanation: $$\begin{bmatrix}
1 & 2 & 3 & 4 \
5 & 6 & 7 & 8
\end{bmatrix}^T
=
\begin{bmatrix}
1 & 5 \
2 & 6 \
3 & 7 \
4 & 8
\end{bmatrix}
$$
End of explanation
from scipy import sparse
# Create a random array with a lot of zeros
rnd = np.random.RandomState(seed=123)
X = rnd.uniform(low=0.0, high=1.0, size=(10, 5))
print(X)
# set the majority of elements to zero
X[X < 0.7] = 0
print(X)
# turn X into a CSR (Compressed-Sparse-Row) matrix
X_csr = sparse.csr_matrix(X)
print(X_csr)
# Converting the sparse matrix to a dense array
print(X_csr.toarray())
Explanation: There is much, much more to know, but these few operations are fundamental to what we'll
do during this tutorial.
SciPy Sparse Matrices
We won't make very much use of these in this tutorial, but sparse matrices are very nice
in some situations. In some machine learning tasks, especially those associated
with textual analysis, the data may be mostly zeros. Storing all these zeros is very
inefficient, and representing in a way that only contains the "non-zero" values can be much more efficient. We can create and manipulate sparse matrices as follows:
End of explanation
# Create an empty LIL matrix and add some items
X_lil = sparse.lil_matrix((5, 5))
for i, j in np.random.randint(0, 5, (15, 2)):
X_lil[i, j] = i + j
print(X_lil)
print(type(X_lil))
X_dense = X_lil.toarray()
print(X_dense)
print(type(X_dense))
Explanation: (You may have stumbled upon an alternative method for converting sparse to dense representations: numpy.todense; toarray returns a NumPy array, whereas todense returns a NumPy matrix. In this tutorial, we will be working with NumPy arrays, not matrices; the latter are not supported by scikit-learn.)
The CSR representation can be very efficient for computations, but it is not
as good for adding elements. For that, the LIL (List-In-List) representation
is better:
End of explanation
X_csr = X_lil.tocsr()
print(X_csr)
print(type(X_csr))
Explanation: Often, once an LIL matrix is created, it is useful to convert it to a CSR format
(many scikit-learn algorithms require CSR or CSC format)
End of explanation
%matplotlib inline
import matplotlib.pyplot as plt
# Plotting a line
x = np.linspace(0, 10, 100)
plt.plot(x, np.sin(x));
# Scatter-plot points
x = np.random.normal(size=500)
y = np.random.normal(size=500)
plt.scatter(x, y);
# Showing images using imshow
# - note that origin is at the top-left by default!
x = np.linspace(1, 12, 100)
y = x[:, np.newaxis]
im = y * np.sin(x) * np.cos(y)
print(im.shape)
plt.imshow(im);
# Contour plots
# - note that origin here is at the bottom-left by default!
plt.contour(im);
# 3D plotting
from mpl_toolkits.mplot3d import Axes3D
ax = plt.axes(projection='3d')
xgrid, ygrid = np.meshgrid(x, y.ravel())
ax.plot_surface(xgrid, ygrid, im, cmap=plt.cm.jet, cstride=2, rstride=2, linewidth=0);
Explanation: The available sparse formats that can be useful for various problems are:
CSR (compressed sparse row)
CSC (compressed sparse column)
BSR (block sparse row)
COO (coordinate)
DIA (diagonal)
DOK (dictionary of keys)
LIL (list in list)
The scipy.sparse submodule also has a lot of functions for sparse matrices
including linear algebra, sparse solvers, graph algorithms, and much more.
matplotlib
Another important part of machine learning is the visualization of data. The most common
tool for this in Python is matplotlib. It is an extremely flexible package, and
we will go over some basics here.
Since we are using Jupyter notebooks, let us use one of IPython's convenient built-in "magic functions", the "matoplotlib inline" mode, which will draw the plots directly inside the notebook.
End of explanation
# %load http://matplotlib.org/mpl_examples/pylab_examples/ellipse_collection.py
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import EllipseCollection
x = np.arange(10)
y = np.arange(15)
X, Y = np.meshgrid(x, y)
XY = np.hstack((X.ravel()[:,np.newaxis], Y.ravel()[:,np.newaxis]))
ww = X/10.0
hh = Y/15.0
aa = X*9
fig, ax = plt.subplots()
ec = EllipseCollection(ww, hh, aa, units='x', offsets=XY,
transOffset=ax.transData)
ec.set_array((X+Y).ravel())
ax.add_collection(ec)
ax.autoscale_view()
ax.set_xlabel('X')
ax.set_ylabel('y')
cbar = plt.colorbar(ec)
cbar.set_label('X+Y')
plt.show()
Explanation: There are many, many more plot types available. One useful way to explore these is by
looking at the matplotlib gallery.
You can test these examples out easily in the notebook: simply copy the Source Code
link on each page, and put it in a notebook using the %load magic.
For example:
End of explanation |
14,719 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
内容索引
相关性分析 --- cov函数、diagonal函数、trace函数、corrcoef函数
多项式拟合 --- polyfit函数、polyval函数、roots函数、polyder函数
计算净额成交量 --- sign函数、piecewise函数
模拟交易过程 --- vectorize函数、round函数
数据平滑 --- hanning函数
Step1: 1. 股票相关性分析
本例子中, 我们使用2个示例数据集提供收盘价数据,第一家公司是BHP Billiton(BHP),其主要业务是石油、金属和钻石开采;第二家公司是Vale(VALE),也是一家金属开采业公司,他们有部分业务是重合的。我们来分析一下他们的股票相关性。
Step2: 协方差描述的是两个变量共同变化的趋势,其实就是归一化前的相关系数。
使用cov函数计算股票收益率的协方差矩阵
Step3: 用相关系数来度量两只股票的相关程度。相关系数的取值范围在-1到1之间,一组数据域自身的相关系数为1.使用corrcoef函数计算相关系数。
Step4: 相关系数矩阵关于对角线对称,BHP与VALE的相关系数等于VALE和BHP的相关系数。看起来0.68的相关系数表示他们的相关程度似乎不是很强。
判断两只股票的价格走势是否同步
如果它们的差值偏离了平均差值2倍于标准差的距离,则认为他们走势不同步
Step5: 这说明,最后一次收盘价不再同步状态,我们暂时不能进行交易
Step6: 2. 多项式拟合
NumPy中的plotfit函数可以用多项式去拟合一系列数据点,无论这些数据点是否来自连续函数都适用。
Step7: 理想情况下,BHP和VALE股票收盘价的差价越小越好。在极限情况下,差值可以在某个点为0。用roots函数找到拟合多项式函数在什么时候达到0。
Step8: 求极值
Step9: 绘制拟合曲线
Step10: 3. 计算净额成交量
成交量表示价格波动的大小,净额成交量(On-Balance Volume)是由当日收盘价、前一天的收盘价以及当日成交量计算得出的。
以前一日为基期计算当日的OBV的值(可认为基期的OBV的值为0)。若当日收盘价高于前一日收盘价,则本日OBV等于基期OBV加上当日成交量,否则减去当日成交量。
我们需要在成交量前面乘上一个有收盘价变化决定的正负号。
Step11: 使用NumPy的sign函数返回每个元素的正负号。
Step12: 使用Numpy的piecewise函数获取数组元素的正负。piecewise(分段),可以根据给定取值,得到分段。
Step13: 4. 模拟交易过程
使用vectorize函数可以减少你的程序中使用循环的次数,NumPy中的vectorize函数相当于Python中的map函数。我们用它来计算单个交易日的利润。
Step14: 我们尝试以比开盘价稍低一点的价格买入股票。如果这个价格不在当日的股价范围内,则尝试买入失败,没有获利,也没有亏损,我们返回0。否则,我们将以当日收盘价卖出,所获得的利润即买入卖出的差价。
Step15: 5. 数据平滑
噪声数据往往很难处理,我们需要对其进行平滑处理。
hanning函数式一个加权余弦的窗函数,我们使用hanning函数平滑股票收益率的数组。
离散卷积运算函数convolve的文档convolve函数文档
Step16: 图中折线有交叉,这些交叉点可能是股价趋势的转折点,至少可以表明BHP和VALE之间的股价关系发生了变化。这些转折点可能会经常出现,我们可以利用他们预测未来的股价走势。 | Python Code:
%matplotlib inline
import numpy as np
from matplotlib.pyplot import plot
from matplotlib.pyplot import show
Explanation: 内容索引
相关性分析 --- cov函数、diagonal函数、trace函数、corrcoef函数
多项式拟合 --- polyfit函数、polyval函数、roots函数、polyder函数
计算净额成交量 --- sign函数、piecewise函数
模拟交易过程 --- vectorize函数、round函数
数据平滑 --- hanning函数
End of explanation
# 首先读入两只股票的收盘价,并计算收益率
bhp_cp = np.loadtxt('BHP.csv', delimiter=',', usecols=(6,), unpack=True)
vale_cp = np.loadtxt('VALE.csv', delimiter=',', usecols=(6,), unpack=True)
bhp_returns = np.diff(bhp_cp) / bhp_cp[:-1]
vale_returns = np.diff(vale_cp) / vale_cp[:-1]
Explanation: 1. 股票相关性分析
本例子中, 我们使用2个示例数据集提供收盘价数据,第一家公司是BHP Billiton(BHP),其主要业务是石油、金属和钻石开采;第二家公司是Vale(VALE),也是一家金属开采业公司,他们有部分业务是重合的。我们来分析一下他们的股票相关性。
End of explanation
covariance = np.cov(bhp_returns, vale_returns)
print 'Covariance:\n', covariance
# 查看协方差矩阵对角线的元素
print 'Covariance diagonal:\n', covariance.diagonal()
# 计算矩阵的迹,即对角线之和
print 'Covariance trace:\n', covariance.trace()
# 计算相关系数,相关系数是协方差除以各自标准差的乘积
print 'Correlation coefficient:\n', covariance / (bhp_returns.std() * vale_returns.std())
Explanation: 协方差描述的是两个变量共同变化的趋势,其实就是归一化前的相关系数。
使用cov函数计算股票收益率的协方差矩阵
End of explanation
# 使用corrcoef计算更加精确
print 'Correlation coefficient:\n', np.corrcoef(bhp_returns, vale_returns)
Explanation: 用相关系数来度量两只股票的相关程度。相关系数的取值范围在-1到1之间,一组数据域自身的相关系数为1.使用corrcoef函数计算相关系数。
End of explanation
difference = bhp_cp - vale_cp
avg = np.mean(difference)
dev = np.std(difference)
# 检查最后一次收盘价是否在同步状态
print "Out of sync : ", np.abs(difference[-1] - avg) > 2*dev
Explanation: 相关系数矩阵关于对角线对称,BHP与VALE的相关系数等于VALE和BHP的相关系数。看起来0.68的相关系数表示他们的相关程度似乎不是很强。
判断两只股票的价格走势是否同步
如果它们的差值偏离了平均差值2倍于标准差的距离,则认为他们走势不同步
End of explanation
# 绘制收益率曲线
t = np.arange(len(bhp_returns))
plot(t, bhp_returns, lw=1)
plot(t, vale_returns, lw=2)
show()
Explanation: 这说明,最后一次收盘价不再同步状态,我们暂时不能进行交易
End of explanation
# 用三次多项式去拟合两只股票收盘价的差价
t = np.arange(len(bhp_cp))
poly = np.polyfit(t, bhp_cp-vale_cp, 3)
print "Polynomial fit\n", poly
# 用刚才得到的多项式对象,推断下一个值
print "Next value: ", np.polyval(poly, t[-1]+1)
Explanation: 2. 多项式拟合
NumPy中的plotfit函数可以用多项式去拟合一系列数据点,无论这些数据点是否来自连续函数都适用。
End of explanation
print "Roots: ", np.roots(poly)
Explanation: 理想情况下,BHP和VALE股票收盘价的差价越小越好。在极限情况下,差值可以在某个点为0。用roots函数找到拟合多项式函数在什么时候达到0。
End of explanation
# 极值位于导数为0的点
der = np.polyder(poly)
print "Dervative:\n", der
# 得到多项式导函数的系数
# 求出导函数的根,即找出原多项式函数的极值点
print "Extremas: ", np.roots(der)
# 通过argmax和argmin函数找到最大最小值点来检查结果
vals = np.polyval(poly, t)
print "Maximum index: ", np.argmax(vals)
print "Minimum index: ", np.argmin(vals)
Explanation: 求极值
End of explanation
plot(t, bhp_cp-vale_cp)
plot(t, vals)
show()
Explanation: 绘制拟合曲线
End of explanation
cp, volume = np.loadtxt('BHP.csv', delimiter=',', usecols=(6,7), unpack=True)
change = np.diff(cp)
print "Change:", change
Explanation: 3. 计算净额成交量
成交量表示价格波动的大小,净额成交量(On-Balance Volume)是由当日收盘价、前一天的收盘价以及当日成交量计算得出的。
以前一日为基期计算当日的OBV的值(可认为基期的OBV的值为0)。若当日收盘价高于前一日收盘价,则本日OBV等于基期OBV加上当日成交量,否则减去当日成交量。
我们需要在成交量前面乘上一个有收盘价变化决定的正负号。
End of explanation
signs = np.sign(change)
print "Signs:\n", signs
Explanation: 使用NumPy的sign函数返回每个元素的正负号。
End of explanation
pieces = np.piecewise(change, [change<0, change>0], [-1,1])
print "Pieces:\n", pieces
# 检查两次输出是否一致
print "Arrays equal?", np.array_equal(signs, pieces)
# OBV值的计算依赖于前一日的收盘价
print "On balance volume: \n", volume[1:]*signs
Explanation: 使用Numpy的piecewise函数获取数组元素的正负。piecewise(分段),可以根据给定取值,得到分段。
End of explanation
# 读入数据
# op is opening price,hp is the highest price
# lp is the lowest price, cp is closing price
op, hp, lp, cp = np.loadtxt('BHP.csv', delimiter=',', usecols=(3,4,5,6), unpack=True)
Explanation: 4. 模拟交易过程
使用vectorize函数可以减少你的程序中使用循环的次数,NumPy中的vectorize函数相当于Python中的map函数。我们用它来计算单个交易日的利润。
End of explanation
def calc_profit(op, high, low, close):
# 以开盘价买入,这里不考虑买入多少股
buy = op
if low < buy < high:
return (close-buy) / buy
else:
return 0
# 矢量化一个函数,这样可以避免使用循环
func = np.vectorize(calc_profit)
profits = func(op, hp, lp, cp)
print 'Profits:\n', profits
# 我们选择非零利润的交易日并计算平均值
real_trades = profits[profits != 0]
print 'Number of trades:\n', len(real_trades), round(100.0 * len(real_trades)/len(cp), 2),"%"
print "Average profit/loss % :", round(np.mean(real_trades) * 100, 2)
# 选择正盈利的交易日并计算平均利润
winning_trades = profits[profits > 0]
print "Number of winning trades", len(winning_trades), round(100.0*len(winning_trades)/len(cp),2),"%"
print "Average profit %", round(np.mean(winning_trades) *100, 2)
# 选择负盈利的交易日并计算平均损失
losing_trades = profits[profits < 0]
print "Number of winning trades", len(losing_trades), round(100.0*len(losing_trades)/len(cp),2),"%"
print "Average profit %", round(np.mean(losing_trades) *100, 2)
Explanation: 我们尝试以比开盘价稍低一点的价格买入股票。如果这个价格不在当日的股价范围内,则尝试买入失败,没有获利,也没有亏损,我们返回0。否则,我们将以当日收盘价卖出,所获得的利润即买入卖出的差价。
End of explanation
# 调用hanning函数计算权重,生成一个长度为N的窗
# 这里N为8
N = 8
weights = np.hanning(N)
print "Weights:\n", weights
# 首先读入两只股票的收盘价,并计算收益率
bhp_cp = np.loadtxt('BHP.csv', delimiter=',', usecols=(6,), unpack=True)
vale_cp = np.loadtxt('VALE.csv', delimiter=',', usecols=(6,), unpack=True)
bhp_returns = np.diff(bhp_cp) / bhp_cp[:-1]
vale_returns = np.diff(vale_cp) / vale_cp[:-1]
# convolve函数,离散线性卷积运算
smooth_bhp = np.convolve(weights/weights.sum(), bhp_returns)[N-1 : -N+1]
smooth_vale = np.convolve(weights/weights.sum(), vale_returns)[N-1 : -N+1]
from matplotlib.pyplot import legend
t = np.arange(N-1, len(bhp_returns))
plot(t, bhp_returns[N-1:], lw=1.0, label='bhp returns')
plot(t, smooth_bhp, lw=2.0, label='smooth bhp')
plot(t, vale_returns[N-1:], lw=1.0, label='vale returns')
plot(t, smooth_vale, lw=2.0, label='smooth vale')
legend(loc='best')
show()
Explanation: 5. 数据平滑
噪声数据往往很难处理,我们需要对其进行平滑处理。
hanning函数式一个加权余弦的窗函数,我们使用hanning函数平滑股票收益率的数组。
离散卷积运算函数convolve的文档convolve函数文档
End of explanation
import matplotlib.pyplot as plt
# 使用多项式拟合平滑后的数据
K = 5
t = np.arange(N-1, len(bhp_returns))
poly_bhp = np.polyfit(t, smooth_bhp, K)
poly_vale = np.polyfit(t, smooth_vale, K)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, smooth_bhp, label="smooth bhp")
poly_bhp_value = np.polyval(poly_bhp, t)
ax1.plot(t, poly_bhp_value, label='poly bhp')
plt.legend()
ax2 = fig.add_subplot(212)
ax2.plot(t, smooth_vale, label="smooth vale")
poly_vale_value = np.polyval(poly_vale, t)
ax2.plot(t, poly_vale_value, label='poly vale')
plt.legend()
show()
# 得到交叉点的x坐标
# 通过求多项式函数差,再求根
poly_sub = np.polysub(poly_bhp, poly_vale)
xpoints = np.roots(poly_sub)
print "Intersection points:", xpoints
# 判断是否为实数
# select选出实数
reals = np.isreal(xpoints)
print "Real number?",reals
xpoints = np.select([reals], [xpoints])
xpoints = xpoints.real
print "Real intersection points:", xpoints
# 去除0元素
# trim_zeros函数可以去掉一维数组中开头和末尾为0的元素
print "Sans 0s", np.trim_zeros(xpoints)
Explanation: 图中折线有交叉,这些交叉点可能是股价趋势的转折点,至少可以表明BHP和VALE之间的股价关系发生了变化。这些转折点可能会经常出现,我们可以利用他们预测未来的股价走势。
End of explanation |
14,720 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
An RNN model to generate sequences
RNN models can generate long sequences based on past data. This can be used to predict stock markets, temperatures, traffic or sales data based on past patterns. They can also be adapted to generate text. The quality of the prediction will depend on training data, network architecture, hyperparameters, the distance in time at which you are predicting and so on. But most importantly, it will depend on wether your training data contains examples of the behaviour patterns you are trying to predict.
<div class="alert alert-block alert-warning">
This is the solution file. The corresponding tutorial file is [01_RNN_generator_playground.ipynb](01_RNN_generator_playground.ipynb)
</div>
Step1: Generate fake dataset
Step2: Hyperparameters
Step3: Visualize training sequences
This is what the neural network will see during training.
Step4: The model definition
<div style="text-align
Step5: Instantiate the model
Step6: Inference
This is a generative model
Step7: Initialize Tensorflow session
This resets all neuron weights and biases to initial random values
Step8: The training loop
You can re-execute this cell to continue training | Python Code:
import math
import numpy as np
from matplotlib import pyplot as plt
import utils_prettystyle
import utils_batching
import utils_display
import tensorflow as tf
print("Tensorflow version: " + tf.__version__)
Explanation: An RNN model to generate sequences
RNN models can generate long sequences based on past data. This can be used to predict stock markets, temperatures, traffic or sales data based on past patterns. They can also be adapted to generate text. The quality of the prediction will depend on training data, network architecture, hyperparameters, the distance in time at which you are predicting and so on. But most importantly, it will depend on wether your training data contains examples of the behaviour patterns you are trying to predict.
<div class="alert alert-block alert-warning">
This is the solution file. The corresponding tutorial file is [01_RNN_generator_playground.ipynb](01_RNN_generator_playground.ipynb)
</div>
End of explanation
WAVEFORM_SELECT = 0# select 0, 1 or 2
def create_time_series(datalen):
# good waveforms
frequencies = [(0.2, 0.15), (0.35, 0.3), (0.6, 0.55)]
freq1, freq2 = frequencies[WAVEFORM_SELECT]
noise = [np.random.random()*0.1 for i in range(datalen)]
x1 = np.sin(np.arange(0,datalen) * freq1) + noise
x2 = np.sin(np.arange(0,datalen) * freq2) + noise
x = x1 + x2
return x.astype(np.float32)
DATA_SEQ_LEN = 1024*128
data = create_time_series(DATA_SEQ_LEN)
plt.plot(data[:512])
plt.show()
Explanation: Generate fake dataset
End of explanation
NB_EPOCHS = 10
RNN_CELLSIZE = 80 # size of the RNN cells
N_LAYERS = 2 # number of stacked RNN cells (needed for tensor shapes but code must be changed manually)
SEQLEN = 32 # unrolled sequence length
BATCHSIZE = 32 # mini-batch size
DROPOUT_PKEEP = 0.7 # dropout: probability of neurons being kept (NOT dropped). Should be between 0.5 and 1.
Explanation: Hyperparameters
End of explanation
# The function dumb_minibatch_sequencer splits the data into batches of sequences sequentially.
for features, labels, epoch in utils_batching.dumb_minibatch_sequencer(data, BATCHSIZE, SEQLEN, nb_epochs=1):
break
print("Features shape: " + str(features.shape))
print("Labels shape: " + str(labels.shape))
print("Excerpt from first batch:")
utils_display.picture_this_7(features)
Explanation: Visualize training sequences
This is what the neural network will see during training.
End of explanation
def model_rnn_fn(features, Hin, labels, step, dropout_pkeep):
X = features
batchsize = tf.shape(X)[0]
seqlen = tf.shape(X)[1]
cells = [tf.nn.rnn_cell.GRUCell(RNN_CELLSIZE) for _ in range(N_LAYERS)]
# add dropout between the cell layers
cells[:-1] = [tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob = dropout_pkeep) for cell in cells[:-1]]
# but no dropout after last cell layer: a small (80->1) regresion layer does not like its inputs being dropped.
# a stacked RNN cell still works like an RNN cell
cell = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=False)
# X[BATCHSIZE, SEQLEN, 1], Hin[BATCHSIZE, RNN_CELLSIZE*N_LAYERS]
# the sequence unrolling happens here
Yn, H = tf.nn.dynamic_rnn(cell, X, initial_state=Hin)
# Yn[BATCHSIZE, SEQLEN, RNN_CELLSIZE]
Yn = tf.reshape(Yn, [batchsize*seqlen, RNN_CELLSIZE])
Yr = tf.layers.dense(Yn, 1) # Yr [BATCHSIZE*SEQLEN, 1]
Yr = tf.reshape(Yr, [batchsize, seqlen, 1]) # Yr [BATCHSIZE, SEQLEN, 1]
Yout = Yr[:,-1,:] # Last output Yout [BATCHSIZE, 1]
loss = tf.losses.mean_squared_error(Yr, labels) # labels[BATCHSIZE, SEQLEN, 1]
lr = 0.001 + tf.train.exponential_decay(0.01, step, 400, 0.5) # 0.001+0.01*0.5^(step/400)
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
train_op = optimizer.minimize(loss)
return Yout, H, loss, train_op
Explanation: The model definition
<div style="text-align: right; font-family: monospace">
X shape [BATCHSIZE, SEQLEN, 1]<br/>
Y shape [BATCHSIZE, SEQLEN, 1]<br/>
H shape [BATCHSIZE, RNN_CELLSIZE*NLAYERS]
</div>
When executed, this function instantiates the Tensorflow graph for our model.
End of explanation
tf.reset_default_graph() # restart model graph from scratch
# placeholder for inputs
Hin = tf.placeholder(tf.float32, [None, RNN_CELLSIZE * N_LAYERS])
features = tf.placeholder(tf.float32, [None, None, 1]) # [BATCHSIZE, SEQLEN, 1]
labels = tf.placeholder(tf.float32, [None, None, 1]) # [BATCHSIZE, SEQLEN, 1]
step = tf.placeholder(tf.int32)
dropout_pkeep = tf.placeholder(tf.float32)
# instantiate the model
Yout, H, loss, train_op = model_rnn_fn(features, Hin, labels, step, dropout_pkeep)
Explanation: Instantiate the model
End of explanation
def prediction_run(prime_data, run_length):
H_ = np.zeros([1, RNN_CELLSIZE * N_LAYERS]) # zero state initially
Yout_ = np.zeros([1, 1])
data_len = prime_data.shape[0]
# prime the state from data
if data_len > 0:
Yin = np.array(prime_data)
Yin = np.reshape(Yin, [1, data_len, 1]) # reshape as one sequence
feed = {Hin: H_, features: Yin, dropout_pkeep: 1.0} # no dropout during inference
Yout_, H_ = sess.run([Yout, H], feed_dict=feed)
# run prediction
# To generate a sequence, run a trained cell in a loop passing as input and input state
# respectively the output and output state from the previous iteration.
results = []
for i in range(run_length):
Yout_ = np.reshape(Yout_, [1, 1, 1]) # batch of a single sequence of a single vector with one element
feed = {Hin: H_, features: Yout_, dropout_pkeep: 1.0} # no dropout during inference
Yout_, H_ = sess.run([Yout, H], feed_dict=feed)
results.append(Yout_[0,0])
return np.array(results)
Explanation: Inference
This is a generative model: run one trained RNN cell in a loop
End of explanation
# first input state
Hzero = np.zeros([BATCHSIZE, RNN_CELLSIZE * N_LAYERS])
# variable initialization
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run([init])
Explanation: Initialize Tensorflow session
This resets all neuron weights and biases to initial random values
End of explanation
H_ = Hzero
losses = []
indices = []
for i, (next_features, next_labels, epoch) in enumerate(utils_batching.rnn_minibatch_sequencer(data, BATCHSIZE, SEQLEN, nb_epochs=NB_EPOCHS)):
next_features = np.expand_dims(next_features, axis=2) # model wants 3D inputs [BATCHSIZE, SEQLEN, 1]
next_labels = np.expand_dims(next_labels, axis=2)
feed = {Hin: H_, features: next_features, labels: next_labels, step: i, dropout_pkeep: DROPOUT_PKEEP}
Yout_, H_, loss_, _ = sess.run([Yout, H, loss, train_op], feed_dict=feed)
# print progress
if i%100 == 0:
print("epoch " + str(epoch) + ", batch " + str(i) + ", loss=" + str(np.mean(loss_)))
if i%10 == 0:
losses.append(np.mean(loss_))
indices.append(i)
plt.ylim(ymax=np.amax(losses[1:])) # ignore first value for scaling
plt.plot(indices, losses)
plt.show()
PRIMELEN=256
RUNLEN=512
OFFSET=20
RMSELEN=128
prime_data = data[OFFSET:OFFSET+PRIMELEN]
results = prediction_run(prime_data, RUNLEN)
utils_display.picture_this_8(data, prime_data, results, OFFSET, PRIMELEN, RUNLEN, RMSELEN)
Explanation: The training loop
You can re-execute this cell to continue training
End of explanation |
14,721 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Lesson 1 & 2
Step1: Set up Structure for VGG 16
Step2: Remove Last Dense Layer (1000 ImageNet Classes) and add a Dense Layer for 2 Classes
Step3: Train Cats vs. Dogs model on dataset in batches
Step4: Train last layer in updated VGG model (with a higher learning rate)
This is done in order to get the last layer's weights to be "in the ballpark" before retraining all dense layers
Step5: Re-train all dense layers excluding dropout (since we see there is underfitting of the dataset)
Step6: View predictions of Images in the Validation Set
Step7: Correct Images (Both Cats and Dogs)
Step8: Incorrect Images (Both Cats and Dogs)
Step9: Confident Cat Correct Classifications
Step10: Confident Dog Correct Classifications
Step11: Most Confident Dogs, but were Cats
Step12: Most Confident Cats, but were Dogs
Step13: Most Uncertain (i.e. probability close to 0.5) | Python Code:
import tensorflow as tf
#path = 'data/dogscats/sample'
path = 'data/dogscats/'
import os
import json
from glob import glob
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import image as mpimg
%matplotlib inline
from tensorflow.contrib.keras.python.keras.models import Model, Sequential
from tensorflow.contrib.keras.python.keras.layers import Conv2D, Dense, Flatten, Input, MaxPooling2D, Dropout
from tensorflow.contrib.keras.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.contrib.keras.python.keras.optimizers import Adam
Explanation: Lesson 1 & 2: Using Convolutional Neural Networks
Exercises from fast.ai
Author: Chris Shih
End of explanation
def vgg_preprocess(x):
x[:, :, 0] -= 103.939
x[:, :, 1] -= 116.779
x[:, :, 2] -= 123.68
return x
vgg = Sequential()
# Conv Block 1
vgg.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1', input_shape=(224, 224, 3)))
vgg.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2'))
vgg.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block1_pool'))
# Conv Block 2
vgg.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1'))
vgg.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2'))
vgg.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block2_pool'))
# Conv Block 3
vgg.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1'))
vgg.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2'))
vgg.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3'))
vgg.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block3_pool'))
# Conv Block 4
vgg.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1'))
vgg.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2'))
vgg.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3'))
vgg.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block4_pool'))
# Conv Block 5
vgg.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1'))
vgg.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2'))
vgg.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3'))
vgg.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='block5_pool'))
# Full Connected Layers
vgg.add(Flatten(name='flatten'))
vgg.add(Dense(4096, activation='relu', name='fc1'))
vgg.add(Dropout(0.5, name='fc1_drop'))
vgg.add(Dense(4096, activation='relu', name='fc2'))
vgg.add(Dropout(0.5, name='fc2_drop'))
vgg.add(Dense(1000, activation='softmax', name='predictions'))
# Load VGG Weights
vgg.load_weights('data/vgg16_tf.h5')
vgg.summary()
Explanation: Set up Structure for VGG 16
End of explanation
vgg.pop()
for layer in vgg.layers:
layer.trainable=False
vgg.add(Dense(2, activation='softmax', name='predictions'))
vgg.summary()
Explanation: Remove Last Dense Layer (1000 ImageNet Classes) and add a Dense Layer for 2 Classes
End of explanation
batch_size = 32
datagen = ImageDataGenerator(
preprocessing_function=vgg_preprocess,
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.15,
zoom_range=0.1,
channel_shift_range=10,
horizontal_flip=True)
batches = datagen.flow_from_directory(os.path.join(path,'train'), target_size=(224,224),
class_mode='categorical', shuffle=True, batch_size=batch_size)
val_batches = datagen.flow_from_directory(os.path.join(path,'valid'), target_size=(224,224),
class_mode='categorical', shuffle=True, batch_size=batch_size)
Explanation: Train Cats vs. Dogs model on dataset in batches
End of explanation
vgg.compile(optimizer=Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy'])
vgg.fit_generator(batches, steps_per_epoch=1000, epochs=2,
validation_data=val_batches, validation_steps=100, verbose=2)
vgg.fit_generator(batches, steps_per_epoch=1000, epochs=5,
validation_data=val_batches, validation_steps=100, verbose=2)
Explanation: Train last layer in updated VGG model (with a higher learning rate)
This is done in order to get the last layer's weights to be "in the ballpark" before retraining all dense layers
End of explanation
vgg.fit_generator(batches, steps_per_epoch=1000, epochs=3,
validation_data=val_batches, validation_steps=100, verbose=2)
vgg.save_weights('vgg_model_v2.h5')
vgg.load_weights('vgg_model_v2.h5')
Explanation: Re-train all dense layers excluding dropout (since we see there is underfitting of the dataset)
End of explanation
val_total = datagen.flow_from_directory(os.path.join(path,'valid'), target_size=(224, 224),
class_mode=None, batch_size=1)
val_data = np.concatenate([val_total.next() for i in range(val_total.n)])
preds = vgg.predict_classes(val_data, batch_size=batch_size, verbose=2)
probs = vgg.predict_proba(val_data, batch_size=batch_size, verbose=2)[:,0]
filenames = val_total.filenames
val_classes = val_total.classes
n_view = 4
def plot_cats_dogs(n_view, idx):
fig = plt.figure(figsize=(12,6))
for i in range(n_view):
fig.add_subplot(1, n_view, i+1)
plt.imshow(mpimg.imread(os.path.join(path,'valid',filenames[idx[i]])))
plt.title('prob: ' + str(probs[idx[i]]))
Explanation: View predictions of Images in the Validation Set
End of explanation
correct = np.where(preds==val_classes)[0]
np.random.shuffle(correct)
corr_idx = correct[:n_view]
plot_cats_dogs(n_view, corr_idx)
Explanation: Correct Images (Both Cats and Dogs)
End of explanation
incorrect = np.where(preds!=val_classes)[0]
np.random.shuffle(incorrect)
incorr_idx = incorrect[:n_view]
plot_cats_dogs(n_view, incorr_idx)
Explanation: Incorrect Images (Both Cats and Dogs)
End of explanation
correct_cats = np.where((preds==0) & (preds==val_classes))[0]
most_correct_cats = np.argsort(probs[correct_cats])[::-1][:n_view]
plot_cats_dogs(n_view, correct_cats[most_correct_cats])
Explanation: Confident Cat Correct Classifications
End of explanation
correct_dogs = np.where((preds==1) & (preds==val_classes))[0]
most_correct_dogs = np.argsort(probs[correct_dogs])[::-1][:n_view]
plot_cats_dogs(n_view, correct_dogs[most_correct_dogs])
Explanation: Confident Dog Correct Classifications
End of explanation
incorrect_dogs = np.where((preds==1) & (preds!=val_classes))[0]
most_incorrect_dogs = np.argsort(probs[incorrect_dogs])[::-1][:n_view]
plot_cats_dogs(n_view, incorrect_dogs[most_incorrect_dogs])
Explanation: Most Confident Dogs, but were Cats
End of explanation
incorrect_cats = np.where((preds==0) & (preds!=val_classes))[0]
most_incorrect_cats = np.argsort(probs[incorrect_cats])[::-1][:n_view]
plot_cats_dogs(n_view, incorrect_cats[most_incorrect_cats])
Explanation: Most Confident Cats, but were Dogs
End of explanation
most_uncertain = np.argsort(np.abs(probs-0.5))
plot_cats_dogs(n_view, most_uncertain)
Explanation: Most Uncertain (i.e. probability close to 0.5)
End of explanation |
14,722 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
San Francisco Crime Classification
Predict the category of crimes that occurred in the city by the bay
From 1934 to 1963, San Francisco was infamous for housing some of the world's most notorious criminals on the inescapable island of Alcatraz.
Today, the city is known more for its tech scene than its criminal past. But, with rising wealth inequality, housing shortages, and a proliferation of expensive digital toys riding BART to work, there is no scarcity of crime in the city by the bay.
From Sunset to SOMA, and Marina to Excelsior, this competition's dataset provides nearly 12 years of crime reports from across all of San Francisco's neighborhoods. Given time and location, you must predict the category of crime that occurred.
What we will do here
Training a machine learning model with scikit-learn
Use the K-nearest neighbors classification model
How does K-Nearest Neighbors (KNN) classification work?
Pick a value for K.
Search for the K observations in the training data that are "nearest" to the measurements of the crime category.
Use the most popular response value from the K nearest neighbors as the predicted response value for the unknown crime category.
Resources
Nearest Neighbors (user guide), KNeighborsClassifier (class documentation)
Step1: Time vs. Day by category
Step3: scikit-learn 4-step modeling pattern
Step 1
Step4: Logarithmic Loss
https
Step5: based on the log loss we can see that around 40 is optimal for k. Now lets predict using the test data
Step6: Can we do better?
Maybe we can try with more features than just lat and lon
But first, our data is not very useful as text. So let's map the strings to ints so that we can use them later | Python Code:
# Step 1 - importing classes we plan to use
import csv as csv
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
import seaborn as sns
# show plots inline
%matplotlib inline
#
# Preparing the data
#
data = pd.read_csv('../input/train.csv',parse_dates=['Dates'], dtype={"X": np.float64,"Y": np.float64}, )
# Add column containing day of week expressed in integer
dow = {
'Monday':0,
'Tuesday':1,
'Wednesday':2,
'Thursday':3,
'Friday':4,
'Saturday':5,
'Sunday':6
}
data['DOW'] = data.DayOfWeek.map(dow)
# Add column containing time of day
data['Hour'] = pd.to_datetime(data.Dates).dt.hour
# display the first 5 rows
data.head()
Explanation: San Francisco Crime Classification
Predict the category of crimes that occurred in the city by the bay
From 1934 to 1963, San Francisco was infamous for housing some of the world's most notorious criminals on the inescapable island of Alcatraz.
Today, the city is known more for its tech scene than its criminal past. But, with rising wealth inequality, housing shortages, and a proliferation of expensive digital toys riding BART to work, there is no scarcity of crime in the city by the bay.
From Sunset to SOMA, and Marina to Excelsior, this competition's dataset provides nearly 12 years of crime reports from across all of San Francisco's neighborhoods. Given time and location, you must predict the category of crime that occurred.
What we will do here
Training a machine learning model with scikit-learn
Use the K-nearest neighbors classification model
How does K-Nearest Neighbors (KNN) classification work?
Pick a value for K.
Search for the K observations in the training data that are "nearest" to the measurements of the crime category.
Use the most popular response value from the K nearest neighbors as the predicted response value for the unknown crime category.
Resources
Nearest Neighbors (user guide), KNeighborsClassifier (class documentation)
End of explanation
# Retrieve categories list
cats = pd.Series(data.Category.values.ravel()).unique()
cats.sort()
#
# First, take a look at the total of all categories
#
plt.figure(1,figsize=(8,4))
plt.hist2d(
data.Hour.values,
data.DOW.values,
bins=[24,7],
range=[[-0.5,23.5],[-0.5,6.5]],
cmap=plt.cm.rainbow
)
plt.xticks(np.arange(0,24,6))
plt.xlabel('Time of Day')
plt.yticks(np.arange(0,7),['Mon','Tue','Wed','Thu','Fri','Sat','Sun'])
plt.ylabel('Day of Week')
plt.gca().invert_yaxis()
plt.title('Occurance by Time and Day - All Categories')
#
# Now look into each category
#
plt.figure(2,figsize=(16,9))
plt.subplots_adjust(hspace=0.5)
for i in np.arange(1,cats.size + 1):
ax = plt.subplot(5,8,i)
ax.set_title(cats[i - 1],fontsize=10)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
plt.hist2d(
data[data.Category==cats[i - 1]].Hour.values,
data[data.Category==cats[i - 1]].DOW.values,
bins=[24,7],
range=[[-0.5,23.5],[-0.5,6.5]],
cmap=plt.cm.rainbow
)
plt.gca().invert_yaxis()
Explanation: Time vs. Day by category
End of explanation
# Separate test and train set out of orignal train set.
msk = np.random.rand(len(data)) < 0.8
knn_train = data[msk]
knn_test = data[~msk]
n = len(knn_test)
print("Original size: %s" % len(data))
print("Train set: %s" % len(knn_train))
print("Test set: %s" % len(knn_test))
# Prepare data sets
x = knn_train[['X', 'Y']]
y = knn_train['Category'].astype('category')
actual = knn_test['Category'].astype('category')
# Fit
import scipy as sp
def llfun1(act, pred):
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = sum(act*sp.log(pred) + sp.subtract(1,act)*sp.log(sp.subtract(1,pred)))
ll = ll * -1.0/len(act)
return ll
def llfun(act, pred):
Logloss function for 1/0 probability
return (-(~(act == pred)).astype(int) * math.log(1e-15)).sum() / len(act)
logloss = []
for i in range(1, 50, 1):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(x, y)
# Predict on test set
outcome = knn.predict(knn_test[['X', 'Y']])
# Logloss
logloss.append(llfun(actual, outcome))
Explanation: scikit-learn 4-step modeling pattern
Step 1:Import the class you plan to use
Step 2: "Instantiate" the "estimator"
* "Estimator" is scikit-learn's term for model
* "Instantiate" means "make an instance of"
* Name of the object does not matter
* Can specify tuning parameters (aka "hyperparameters") during this step
* All parameters not specified are set to their defaults
Step 3: Fit the model with data (aka "model training")
* Model is learning the relationship between X and y
* Occurs in-place
Step 4: Predict the response for a new observation
* New observations are called "out-of-sample" data
* Uses the information it learned during the model training process
Fitting the data
Ok, so what are we actually trying to do? Given location, you must predict the category of crime that occurred.
* Store feature matrix in X - this will be the location inputs
* Store response in y - this will be the category of crime, since that is what we are prediciting
End of explanation
plt.plot(logloss)
plt.savefig('n_neighbors_vs_logloss.png')
Explanation: Logarithmic Loss
https://www.kaggle.com/wiki/LogarithmicLoss
The logarithm of the likelihood function for a Bernouli random distribution.
In plain English, this error metric is used where contestants have to predict that something is true or false with a probability (likelihood) ranging from definitely true (1) to equally true (0.5) to definitely false(0).
The use of log on the error provides extreme punishments for being both confident and wrong. In the worst possible case, a single prediction that something is definitely true (1) when it is actually false will add infinite to your error score and make every other entry pointless. In Kaggle competitions, predictions are bounded away from the extremes by a small value in order to prevent this.
Let's plot it as a function of k for our nearest neighbor
End of explanation
# Submit for K=40
knn = KNeighborsClassifier(n_neighbors=40)
knn.fit(x, y)
# predict from our test set
test = pd.read_csv('../input/test.csv',parse_dates=['Dates'], dtype={"X": np.float64,"Y": np.float64}, )
x_test = test[['X', 'Y']]
outcomes = knn.predict(x_test)
submit = pd.DataFrame({'Id': test.Id.tolist()})
for category in y.cat.categories:
submit[category] = np.where(outcomes == category, 1, 0)
submit.to_csv('k_nearest_neigbour.csv', index = False)
Explanation: based on the log loss we can see that around 40 is optimal for k. Now lets predict using the test data
End of explanation
# map pd district to int
unique_pd_district = data["PdDistrict"].unique()
pd_district_mapping = {}
i=0
for c in unique_pd_district:
pd_district_mapping[c] = i
i += 1
data['PdDistrictId'] = data.PdDistrict.map(pd_district_mapping)
print(data.describe())
data.tail()
# store feature matrix in "X"
X = data[['Hour','DOW','X','Y','PdDistrictId']]
# store response vector in "y"
y = data['Category'].astype('category')
# Submit for K=40
knn = KNeighborsClassifier(n_neighbors=40)
knn.fit(X, y)
test = pd.read_csv('../input/test.csv',parse_dates=['Dates'], dtype={"X": np.float64,"Y": np.float64}, )
# clean up test set
test['DOW'] = test.DayOfWeek.map(dow)
test['Hour'] = pd.to_datetime(test.Dates).dt.hour
test['PdDistrictId'] = test.PdDistrict.map(pd_district_mapping)
test.tail()
# Predictions for the test set
X_test = test[['Hour','DOW','X','Y','PdDistrictId']]
outcomes = knn.predict(X_test)
submit = pd.DataFrame({'Id': test.Id.tolist()})
for category in y.cat.categories:
submit[category] = np.where(outcomes == category, 1, 0)
submit.to_csv('k_nearest_neigbour_2.csv', index = False)
# lets see how much dow, hour and district correlate to category
plt.figure()
sns.pairplot(data=data[["Category","DOW","Hour","PdDistrictId"]],
hue="Category", dropna=True)
plt.savefig("seaborn_pair_plot.png")
import csv as csv
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KNeighborsClassifier
import seaborn as sns
# show plots inline
# Add column containing day of week expressed in integer
dow = {
'Monday':0,
'Tuesday':1,
'Wednesday':2,
'Thursday':3,
'Friday':4,
'Saturday':5,
'Sunday':6
}
data = pd.read_csv('../input/train.csv',parse_dates=['Dates'], dtype={"X": np.float64,"Y": np.float64}, )
data['DOW'] = data.DayOfWeek.map(dow)
data['Hour'] = pd.to_datetime(data.Dates).dt.hour
X = data[['Hour','DOW','X','Y']]
y = data['Category'].astype('category')
knn = KNeighborsClassifier(n_neighbors=39)
knn.fit(X, y)
test = pd.read_csv('../input/test.csv',parse_dates=['Dates'], dtype={"X": np.float64,"Y": np.float64}, )
test['DOW'] = test.DayOfWeek.map(dow)
test['Hour'] = pd.to_datetime(test.Dates).dt.hour
X_test = test[['Hour','DOW','X','Y']]
outcomes = knn.predict(X_test)
submit = pd.DataFrame({'Id': test.Id.tolist()})
for category in y.cat.categories:
submit[category] = np.where(outcomes == category, 1, 0)
submit.to_csv('k_nearest_neigbour3.csv', index = False)
Explanation: Can we do better?
Maybe we can try with more features than just lat and lon
But first, our data is not very useful as text. So let's map the strings to ints so that we can use them later
End of explanation |
14,723 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Compiled Sequential Importance Sampling
Compiled sequential importance sampling [1], or inference compilation, is a technique to amortize the computational cost of inference by learning a proposal distribution for importance sampling.
The proposal distribution is learned to minimise the KL divergence between the model and the guide, $\rm{KL}!\left( p({\bf z} | {\bf x}) \lVert q_{\phi, x}({\bf z}) \right)$. This differs from variational inference, which would minimise $\rm{KL}!\left( q_{\phi, x}({\bf z}) \lVert p({\bf z} | {\bf x}) \right)$. Using this loss encourages the approximate proposal distribution to be broader than the true posterior (mass covering), whereas variational inference typically learns a narrower approximation (mode seeking). Guides for importance sampling are usually desired to have heavier tails than the model (see this stackexchange question). Therefore, the inference compilation loss is usually more suited to compiling a guide for importance sampling.
Another benefit of CSIS is that, unlike many types of variational inference, it has no requirement that the model is differentiable. This allows it to be used for inference on arbitrarily complex programs (e.g. a Captcha renderer [1]).
This example shows CSIS being used to speed up inference on a simple problem with a known analytic solution.
Step1: Specify the model
Step2: And the guide
Step3: Now create a CSIS instance
Step4: Now we 'compile' the instance to perform inference on this model
Step5: And now perform inference by importance sampling
Step6: We now plot the results and compare with importance sampling | Python Code:
import torch
import torch.nn as nn
import torch.functional as F
import pyro
import pyro.distributions as dist
import pyro.infer
import pyro.optim
import os
smoke_test = ('CI' in os.environ)
n_steps = 2 if smoke_test else 2000
Explanation: Compiled Sequential Importance Sampling
Compiled sequential importance sampling [1], or inference compilation, is a technique to amortize the computational cost of inference by learning a proposal distribution for importance sampling.
The proposal distribution is learned to minimise the KL divergence between the model and the guide, $\rm{KL}!\left( p({\bf z} | {\bf x}) \lVert q_{\phi, x}({\bf z}) \right)$. This differs from variational inference, which would minimise $\rm{KL}!\left( q_{\phi, x}({\bf z}) \lVert p({\bf z} | {\bf x}) \right)$. Using this loss encourages the approximate proposal distribution to be broader than the true posterior (mass covering), whereas variational inference typically learns a narrower approximation (mode seeking). Guides for importance sampling are usually desired to have heavier tails than the model (see this stackexchange question). Therefore, the inference compilation loss is usually more suited to compiling a guide for importance sampling.
Another benefit of CSIS is that, unlike many types of variational inference, it has no requirement that the model is differentiable. This allows it to be used for inference on arbitrarily complex programs (e.g. a Captcha renderer [1]).
This example shows CSIS being used to speed up inference on a simple problem with a known analytic solution.
End of explanation
def model(prior_mean, observations={"x1": 0, "x2": 0}):
x = pyro.sample("z", dist.Normal(prior_mean, torch.tensor(5**0.5)))
y1 = pyro.sample("x1", dist.Normal(x, torch.tensor(2**0.5)), obs=observations["x1"])
y2 = pyro.sample("x2", dist.Normal(x, torch.tensor(2**0.5)), obs=observations["x2"])
return x
Explanation: Specify the model:
The model is specified in the same way as any Pyro model, except that a keyword argument, observations, must be used to input a dictionary with each observation as a key. Since inference compilation involves learning to perform inference for any observed values, it is not important what the values in the dictionary are. 0 is used here.
End of explanation
class Guide(nn.Module):
def __init__(self):
super().__init__()
self.neural_net = nn.Sequential(
nn.Linear(2, 10),
nn.ReLU(),
nn.Linear(10, 20),
nn.ReLU(),
nn.Linear(20, 10),
nn.ReLU(),
nn.Linear(10, 5),
nn.ReLU(),
nn.Linear(5, 2))
def forward(self, prior_mean, observations={"x1": 0, "x2": 0}):
pyro.module("guide", self)
x1 = observations["x1"]
x2 = observations["x2"]
v = torch.cat((x1.view(1, 1), x2.view(1, 1)), 1)
v = self.neural_net(v)
mean = v[0, 0]
std = v[0, 1].exp()
pyro.sample("z", dist.Normal(mean, std))
guide = Guide()
Explanation: And the guide:
The guide will be trained (a.k.a. compiled) to use the observed values to make proposal distributions for each unconditioned sample statement. In the paper [1], a neural network architecture is automatically generated for any model. However, for the implementation in Pyro the user must specify a task-specific guide program structure. As with any Pyro guide function, this should have the same call signature as the model. It must also encounter the same unobserved sample statements as the model. So that the guide program can be trained to make good proposal distributions, the distributions at sample statements should depend on the values in observations. In this example, a feed-forward neural network is used to map the observations to a proposal distribution for the latent variable.
pyro.module is called when the guide function is run so that the guide parameters can be found by the optimiser during training.
End of explanation
optimiser = pyro.optim.Adam({'lr': 1e-3})
csis = pyro.infer.CSIS(model, guide, optimiser, num_inference_samples=50)
prior_mean = torch.tensor(1.)
Explanation: Now create a CSIS instance:
The object is initialised with the model; the guide; a PyTorch optimiser for training the guide; and the number of importance-weighted samples to draw when performing inference. The guide will be optimised for a particular value of the model/guide argument, prior_mean, so we use the value set here throughout training and inference.
End of explanation
for step in range(n_steps):
csis.step(prior_mean)
Explanation: Now we 'compile' the instance to perform inference on this model:
The arguments given to csis.step are passed to the model and guide when they are run to evaluate the loss.
End of explanation
posterior = csis.run(prior_mean,
observations={"x1": torch.tensor(8.),
"x2": torch.tensor(9.)})
marginal = pyro.infer.EmpiricalMarginal(posterior, "z")
Explanation: And now perform inference by importance sampling:
The compiled guide program should now be able to propose a distribution for z that approximates the posterior, $p(z | x_1, x_2)$, for any $x_1, x_2$. The same prior_mean is entered again, as well as the observed values inside observations.
End of explanation
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
with torch.no_grad():
# Draw samples from empirical marginal for plotting
csis_samples = torch.stack([marginal() for _ in range(1000)])
# Calculate empirical marginal with importance sampling
is_posterior = pyro.infer.Importance(model, num_samples=50).run(
prior_mean, observations={"x1": torch.tensor(8.),
"x2": torch.tensor(9.)})
is_marginal = pyro.infer.EmpiricalMarginal(is_posterior, "z")
is_samples = torch.stack([is_marginal() for _ in range(1000)])
# Calculate true prior and posterior over z
true_posterior_z = torch.arange(-10, 10, 0.05)
true_posterior_p = dist.Normal(7.25, (5/6)**0.5).log_prob(true_posterior_z).exp()
prior_z = true_posterior_z
prior_p = dist.Normal(1., 5**0.5).log_prob(true_posterior_z).exp()
plt.rcParams['figure.figsize'] = [30, 15]
plt.rcParams.update({'font.size': 30})
fig, ax = plt.subplots()
plt.plot(prior_z, prior_p, 'k--', label='Prior')
plt.plot(true_posterior_z, true_posterior_p, color='k', label='Analytic Posterior')
plt.hist(csis_samples.numpy(), range=(-10, 10), bins=100, color='r', density=1,
label="Inference Compilation")
plt.hist(is_samples.numpy(), range=(-10, 10), bins=100, color='b', density=1,
label="Importance Sampling")
plt.xlim(-8, 10)
plt.ylim(0, 5)
plt.xlabel("z")
plt.ylabel("Estimated Posterior Probability Density")
plt.legend()
plt.show()
Explanation: We now plot the results and compare with importance sampling:
We observe $x_1 = 8$ and $x_2 = 9$. Inference is performed by taking 50 samples using CSIS, and 50 using importance sampling from the prior. We then plot the resulting approximations to the posterior distributions, along with the analytic posterior.
End of explanation |
14,724 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Python + Astronomy
This course will be an introduction to Astropy, a maturing library for astronomy routines and tools in Python.
Astropy started as a combination of various common Python libraries (Pyfits, PyWCS, asciitables, and others) and is working towards providing a consistent API with capabilities for all astronomers. It is developed with extensive automated testing, long-term stable releases, extensive documentation, and a friendly community for contributions.
Note that this design differs from the IDL Astronomy User's Library, which is essentially a mishmash of routines.
These tutorials make some use of the examples at
Step1: Using FITS files in Python
FITS files are the commonly used data format in astronomy
Step2: The FITS file contains two header data units, a Primary HDU and an ASCII table HDU (see NASA's Primer) for the different types and limitations.
We can use the hdu_list object like a list to obtain information about each HDU
Step3: Since this is an image, we could take a look at it with the matplotlib package
Step4: We can manipulate the HDU's in any way that we want with the astropy.io.fits submodule
Step5: Let's write our new FITS file to our local computer. Running pwd will tell us what directory it is saved to.
Step6: Astropy ASCII file reader
While a number of ASCII file readers exist (including numpy.genfromtxt, numpy.loadtxt, and pandas.read_*), Astropy includes readers text file formats commonly used in Astronomy.
These are read as an Astropy Table object, which are convertable to numpy arrays or pandas DataFrames. These can contain unit information and there is work on-going to incoporate uncertainities.
Step7: Tables support many of the same indexing and slicing operations as numpy arrays, as well as some of the higher-level operations of pandas. See the Astropy tutorial for more examples.
Units and Quantities
A nice addition to Astropy is the ability to manipulate units used in astronomy. By convention, we import this functionality into the name u
Step8: We can create composite units, such as units of acceleration
Step9: In addition to unit manipulation, Astropy has a concept of Quantities - numbers (or arrays) with units
Step10: Since the computer knows the physical types of each unit, it is able to make conversions between them. Let's use this to simplify my_data. The decompose method will try to use the most basic units, while the .si and .cgs will attempt simple representations with those two bases
Step11: Astropy also includes constants in another submodule, astropy.constants. For example, the average magnitude of the gravitational force of the Earth on the Sun, in SI units, is
Step12: Astropy will even convert units that are not physically compatible, if you are explicit about how to do the conversion. For example, the relationship between wavelength and frequency of light is defined by the choice of the speed of light, allowing the conversion of one to the other
Step13: A very useful trick is that Astropy will even convert units that require extra information to do so. For example, flux density is usually defined as a density with respect to wavelength or frequency, with the two forms convertable via
Step14: Celestial Coordinate Systems
What about units that coorrespond to locations?
While there does exists u.degree and u.arcsecond, the essential coordinate manipulation is part of the astropy.coordinates submodule. Coordinate conversions, catalog conversions, and more are supported.
Step15: Honorable Mentions in Astropy
These are some things that I'm not very familiar with, but I want to point out with a few quick examples.
votable
VOTables are an alternative format to FITS in use by virtual observatory projects. This one is difficult to prepare a head of time, since these are typically generated on the fly in response to search/database queries. astropy.io.votable handles these files.
Modeling
The astropy.modeling submodule is concerned with the fitting of models to data. The goal is to make it easy to fit or represent your data using common models, such as broken power laws or other composite models.
For example, here is some synthetic data that is roughly Gaussian-like
Step16: Cosmology
There is also some work for cosmology computations, specifically with different cosmologies.
For this, it is essential to load a Cosmology object. These are, by convention, named cosmo | Python Code:
# First, make sure this works:
import astropy
# If this doesn't work, raise your hand!
Explanation: Python + Astronomy
This course will be an introduction to Astropy, a maturing library for astronomy routines and tools in Python.
Astropy started as a combination of various common Python libraries (Pyfits, PyWCS, asciitables, and others) and is working towards providing a consistent API with capabilities for all astronomers. It is developed with extensive automated testing, long-term stable releases, extensive documentation, and a friendly community for contributions.
Note that this design differs from the IDL Astronomy User's Library, which is essentially a mishmash of routines.
These tutorials make some use of the examples at:
- The official Astropy Tutorials
- A Workshop Given at SciPy 2014
End of explanation
# First we load the fits submodule from astropy:
from astropy.io import fits
# Then we load a fits file (here an image from the Schmidt telescope)
hdu_list = fits.open('http://data.astropy.org/tutorials/FITS-images/HorseHead.fits')
print(hdu_list)
Explanation: Using FITS files in Python
FITS files are the commonly used data format in astronomy: they are essentially collections of "header data units," which can be images, tables, or some other type of data.
End of explanation
print(hdu_list[0].data)
print(type(hdu_list[0].data))
print(hdu_list[0].header['FILTER'])
print(hdu_list[0].shape)
# We can also display the full header to get a better idea of what we are looking at
hdu_list[0].header
Explanation: The FITS file contains two header data units, a Primary HDU and an ASCII table HDU (see NASA's Primer) for the different types and limitations.
We can use the hdu_list object like a list to obtain information about each HDU:
End of explanation
# If using ipython notebook:
%matplotlib inline
# Load matplotlib
import matplotlib.pyplot as plt
# Load colormaps (the default is somewhat ugly)
from matplotlib import cm
# If *not* using ipython notebook:
# plt.ion()
plt.imshow(hdu_list[0].data, cmap=cm.gist_heat)
plt.colorbar()
Explanation: Since this is an image, we could take a look at it with the matplotlib package:
End of explanation
hdu_list[0].data /= 2
hdu_list[0].header['FAKE'] = 'New Header'
hdu_list[0].header['FILTER'] = 'Changed'
print(hdu_list[0].data)
hdu_list[0].header
Explanation: We can manipulate the HDU's in any way that we want with the astropy.io.fits submodule:
End of explanation
%pwd
hdu_list.writeto('new-horsehead.fits', clobber=True)
Explanation: Let's write our new FITS file to our local computer. Running pwd will tell us what directory it is saved to.
End of explanation
# First we load the ascii submodule:
from astropy.io import ascii
example_csv = ascii.read('http://samplecsvs.s3.amazonaws.com/Sacramentorealestatetransactions.csv')
print(example_csv)
# We can also read Astronomy-specific formats.
# For example, IPAC formatted files
example_ipac = ascii.read('http://exoplanetarchive.ipac.caltech.edu/docs/tblexamples/IPAC_ASCII_one_header.tbl')
print(example_ipac)
Explanation: Astropy ASCII file reader
While a number of ASCII file readers exist (including numpy.genfromtxt, numpy.loadtxt, and pandas.read_*), Astropy includes readers text file formats commonly used in Astronomy.
These are read as an Astropy Table object, which are convertable to numpy arrays or pandas DataFrames. These can contain unit information and there is work on-going to incoporate uncertainities.
End of explanation
from astropy import units as u
# SI, cgs, and other units are defined in Astropy:
u.m, u.angstrom, u.erg, u.Jy, u.solMass
# Units all have documentation and attributes
print(u.solMass.names)
print(u.solMass.physical_type)
Explanation: Tables support many of the same indexing and slicing operations as numpy arrays, as well as some of the higher-level operations of pandas. See the Astropy tutorial for more examples.
Units and Quantities
A nice addition to Astropy is the ability to manipulate units used in astronomy. By convention, we import this functionality into the name u:
End of explanation
u.m / u.second / u.second
u.pc / u.attosecond / u.fortnight
Explanation: We can create composite units, such as units of acceleration:
End of explanation
print(5*u.erg/u.second)
5*u.erg/u.second
import numpy as np
my_data = np.array([1,2,3,4,5,6]) * u.Hertz
print(my_data)
# Quantities (and their units) can be combined through algebraic manipulation:
new_data = (6.626e-34 * u.m**2 * u.kg / u.second) * my_data
print(new_data)
Explanation: In addition to unit manipulation, Astropy has a concept of Quantities - numbers (or arrays) with units:
End of explanation
print(new_data.cgs)
print(new_data.si)
print(new_data.decompose())
# We can use the to() method to convert to anything with the same physical_type
print(new_data.unit.physical_type)
print(new_data.to(u.joule))
print(new_data.to(u.eV))
# With the to() method, unit changes are relatively straightforward:
(420*u.parsec).to(u.AU)
Explanation: Since the computer knows the physical types of each unit, it is able to make conversions between them. Let's use this to simplify my_data. The decompose method will try to use the most basic units, while the .si and .cgs will attempt simple representations with those two bases:
End of explanation
from astropy.constants import M_earth, G, M_sun
(G * M_earth * M_sun / u.AU**2).to(u.N)
Explanation: Astropy also includes constants in another submodule, astropy.constants. For example, the average magnitude of the gravitational force of the Earth on the Sun, in SI units, is:
End of explanation
(450. * u.nm).to(u.GHz, equivalencies=u.spectral())
Explanation: Astropy will even convert units that are not physically compatible, if you are explicit about how to do the conversion. For example, the relationship between wavelength and frequency of light is defined by the choice of the speed of light, allowing the conversion of one to the other:
End of explanation
f_lambda = (1e-18 * u.erg / u.cm**2 / u.s / u.angstrom)
print(f_lambda.to(u.Jy, equivalencies=u.equivalencies.spectral_density(1*u.micron)))
print(f_lambda.to(u.Jy, equivalencies=u.equivalencies.spectral_density(299.79*u.THz)))
Explanation: A very useful trick is that Astropy will even convert units that require extra information to do so. For example, flux density is usually defined as a density with respect to wavelength or frequency, with the two forms convertable via:
$$ \nu f_\nu = \lambda f_\lambda$$
To convert between the different definitions of flux density, we merely need to supply the wavelength or frequency used:
End of explanation
# Let's import the main class used, SkyCoord, and create a couple SkyCoord objects:
from astropy.coordinates import SkyCoord
print(SkyCoord(-2*u.deg, 56*u.deg))
print(SkyCoord(1*u.hourangle, 5*u.degree))
print(SkyCoord('2h2m1s 9d9m9s'))
print(SkyCoord('-2.32d', '52.3d', frame='fk4'))
print(SkyCoord.from_name("M101"))
sc = SkyCoord('25d 35d')
# We can retrive the coordinates we used to create these objects:
print(sc.ra)
print(sc.dec)
# We can transform coordinates to different representations (i.e., coordinate systems)
print(sc.transform_to('fk4'))
print(sc.transform_to('galactic'))
# Seperations and position angles are calculatable from SkyCoord objects:
sc2 = SkyCoord('35d 25d', frame='galactic')
print(sc.separation(sc2))
# When we have a world coordinate system (e.g., from a FITS file header), we can convert to and from pixel coordinates:
from astropy.wcs import WCS
w = WCS(hdu_list[0].header)
print(sc.to_pixel(w))
print(SkyCoord.from_pixel(5, 5, w))
Explanation: Celestial Coordinate Systems
What about units that coorrespond to locations?
While there does exists u.degree and u.arcsecond, the essential coordinate manipulation is part of the astropy.coordinates submodule. Coordinate conversions, catalog conversions, and more are supported.
End of explanation
import numpy as np
from astropy.modeling import models, fitting
np.random.seed(0)
x = np.linspace(-5., 5., 200)
y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
y += np.random.normal(0., 0.2, x.shape)
plt.plot(x, y, 'ko')
plt.xlabel('Position')
plt.ylabel('Flux')
# Fit the data using a Gaussian
model_object = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
fiter = fitting.LevMarLSQFitter()
g = fiter(model_object, x, y)
plt.plot(x, y, 'ko')
plt.plot(x, g(x), 'r-', lw=2)
plt.xlabel('Position')
plt.ylabel('Flux')
# Get information about the fitted model:
print(g)
Explanation: Honorable Mentions in Astropy
These are some things that I'm not very familiar with, but I want to point out with a few quick examples.
votable
VOTables are an alternative format to FITS in use by virtual observatory projects. This one is difficult to prepare a head of time, since these are typically generated on the fly in response to search/database queries. astropy.io.votable handles these files.
Modeling
The astropy.modeling submodule is concerned with the fitting of models to data. The goal is to make it easy to fit or represent your data using common models, such as broken power laws or other composite models.
For example, here is some synthetic data that is roughly Gaussian-like:
End of explanation
# Load the 9-year WMAP Cosmology and get H_0
from astropy.cosmology import WMAP9 as cosmo
print(cosmo.H(0))
# find the age of the universe at a given redshift:
print(cosmo.age(1))
# Other cosmologies are avaliable
from astropy.cosmology import Planck13 as cosmo
print(cosmo.age(1))
# Build your own cosmology
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05)
print(cosmo.age(1))
Explanation: Cosmology
There is also some work for cosmology computations, specifically with different cosmologies.
For this, it is essential to load a Cosmology object. These are, by convention, named cosmo:
End of explanation |
14,725 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Encoder-Decoder Analysis
Model Architecture
Step1: Perplexity on Each Dataset
Step2: Loss vs. Epoch
Step3: Perplexity vs. Epoch
Step4: Generations
Step5: BLEU Analysis
Step6: N-pairs BLEU Analysis
This analysis randomly samples 1000 pairs of generations/ground truths and treats them as translations, giving their BLEU score. We can expect very low scores in the ground truth and high scores can expose hyper-common generations
Step7: Alignment Analysis
This analysis computs the average Smith-Waterman alignment score for generations, with the same intuition as N-pairs BLEU, in that we expect low scores in the ground truth and hyper-common generations to raise the scores | Python Code:
report_file = '/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing23_200_512_04drb/encdec_noing23_200_512_04drb.json'
log_file = '/Users/bking/IdeaProjects/LanguageModelRNN/experiment_results/encdec_noing23_200_512_04drb/encdec_noing23_200_512_04drb_logs.json'
import json
import matplotlib.pyplot as plt
with open(report_file) as f:
report = json.loads(f.read())
with open(log_file) as f:
logs = json.loads(f.read())
print'Encoder: \n\n', report['architecture']['encoder']
print'Decoder: \n\n', report['architecture']['decoder']
Explanation: Encoder-Decoder Analysis
Model Architecture
End of explanation
print('Train Perplexity: ', report['train_perplexity'])
print('Valid Perplexity: ', report['valid_perplexity'])
print('Test Perplexity: ', report['test_perplexity'])
Explanation: Perplexity on Each Dataset
End of explanation
%matplotlib inline
for k in logs.keys():
plt.plot(logs[k][0], logs[k][1], label=str(k) + ' (train)')
plt.plot(logs[k][0], logs[k][2], label=str(k) + ' (valid)')
plt.title('Loss v. Epoch')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
Explanation: Loss vs. Epoch
End of explanation
%matplotlib inline
for k in logs.keys():
plt.plot(logs[k][0], logs[k][3], label=str(k) + ' (train)')
plt.plot(logs[k][0], logs[k][4], label=str(k) + ' (valid)')
plt.title('Perplexity v. Epoch')
plt.xlabel('Epoch')
plt.ylabel('Perplexity')
plt.legend()
plt.show()
Explanation: Perplexity vs. Epoch
End of explanation
def print_sample(sample, best_bleu=None):
enc_input = ' '.join([w for w in sample['encoder_input'].split(' ') if w != '<pad>'])
gold = ' '.join([w for w in sample['gold'].split(' ') if w != '<mask>'])
print('Input: '+ enc_input + '\n')
print('Gend: ' + sample['generated'] + '\n')
print('True: ' + gold + '\n')
if best_bleu is not None:
cbm = ' '.join([w for w in best_bleu['best_match'].split(' ') if w != '<mask>'])
print('Closest BLEU Match: ' + cbm + '\n')
print('Closest BLEU Score: ' + str(best_bleu['best_score']) + '\n')
print('\n')
for i, sample in enumerate(report['train_samples']):
print_sample(sample, report['best_bleu_matches_train'][i] if 'best_bleu_matches_train' in report else None)
for i, sample in enumerate(report['valid_samples']):
print_sample(sample, report['best_bleu_matches_valid'][i] if 'best_bleu_matches_valid' in report else None)
for i, sample in enumerate(report['test_samples']):
print_sample(sample, report['best_bleu_matches_test'][i] if 'best_bleu_matches_test' in report else None)
Explanation: Generations
End of explanation
def print_bleu(blue_struct):
print 'Overall Score: ', blue_struct['score'], '\n'
print '1-gram Score: ', blue_struct['components']['1']
print '2-gram Score: ', blue_struct['components']['2']
print '3-gram Score: ', blue_struct['components']['3']
print '4-gram Score: ', blue_struct['components']['4']
# Training Set BLEU Scores
print_bleu(report['train_bleu'])
# Validation Set BLEU Scores
print_bleu(report['valid_bleu'])
# Test Set BLEU Scores
print_bleu(report['test_bleu'])
# All Data BLEU Scores
print_bleu(report['combined_bleu'])
Explanation: BLEU Analysis
End of explanation
# Training Set BLEU n-pairs Scores
print_bleu(report['n_pairs_bleu_train'])
# Validation Set n-pairs BLEU Scores
print_bleu(report['n_pairs_bleu_valid'])
# Test Set n-pairs BLEU Scores
print_bleu(report['n_pairs_bleu_test'])
# Combined n-pairs BLEU Scores
print_bleu(report['n_pairs_bleu_all'])
# Ground Truth n-pairs BLEU Scores
print_bleu(report['n_pairs_bleu_gold'])
Explanation: N-pairs BLEU Analysis
This analysis randomly samples 1000 pairs of generations/ground truths and treats them as translations, giving their BLEU score. We can expect very low scores in the ground truth and high scores can expose hyper-common generations
End of explanation
print 'Average (Train) Generated Score: ', report['average_alignment_train']
print 'Average (Valid) Generated Score: ', report['average_alignment_valid']
print 'Average (Test) Generated Score: ', report['average_alignment_test']
print 'Average (All) Generated Score: ', report['average_alignment_all']
print 'Average Gold Score: ', report['average_alignment_gold']
Explanation: Alignment Analysis
This analysis computs the average Smith-Waterman alignment score for generations, with the same intuition as N-pairs BLEU, in that we expect low scores in the ground truth and hyper-common generations to raise the scores
End of explanation |
14,726 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
类元编程是指在运行时创建或定制类的技艺,在 Python 中,类是一等对象,因此任何时候都可以使用函数新建类,无需使用 class 关键字。类装饰器也是函数,不公审查,修改甚至可以把被装饰类替换成其它类。最后,元类是类元编程最高级的工具,使用元类可以创建具有某种特质的全新类种,例如我们见过的抽象基类
类工厂函数
标准库的一个类工厂函数 -- collections.namedtuple。我们把一个类名和几个属性名传给这个函数,它会创建一个 tuple 的子类,其中元素通过名称获取,还为调试提供了友好的字符串表示(__repr__)
Step1: 这段代码各个字段名都出现了三次,让人厌烦,字符串表现形式也不友好,我们编写一个 record_factory 类工厂函数解决这个问题
Step2: 通常,我们将 type 视为函数,因为我们像函数那样使用它,type(my_object) 获取对象所属的类 -- 作用与 my_object.__class__ 相同。然而,type 是一个类,当成类使用的时候传入三个参数可以新建一个类(是的,type 可以根据传入的不同参数有不同的用法
type(类名, 父类的元组(针对继承的情况,可以为空),包含属性的字典(名称和值))
比如下面的代码是等价的
```
class MyShinyClass
Step3: 如果你继承 Foo 类,可以写成
FooChild = type('FooChild', (Foo,),{})
我们看到 type 函数可以创建一个类,因为 type 是元类,Python 中所有对象都是由 type 创建而来,注意,Python 中所有的东西都是对象,包括 整数,字符串、函数以及类,都由 type 创建而来
Step4: 总之,前面的 record_factory 函数最后一行会构建一个类,类的名称是 cls_name 参数的值,唯一直接超类是 object,有 __slots__, __init__, __iter__, __repr__ 四个类属性,其中后 3 个是实例方法。
我们本来可以将 __slots__ 类属性改成其它值,不过那样就要实现 __setattr__ 方法,为属性赋值时验证属性的名称,而且顺序相同,然而第 9 章说过,__slots__ 属性最主要特点就是节省内存,能处理数百万个实例,不过也有一些缺点。
把 3 个参数传给 type 是动态创建类的常用方式,如果查看 collections.namedtuple 源码会发现另一种方式,先声明一个 _class_template 变量,其值是字符串形式源码模板,然后在 namedtuple 函数中调用 _class_template.format(...) 方法,填充模板里的空白,最后,使用内置的 exec函数计算得到源码字符串
在 Python 元编程时,最好不要使用 exec 和 eval 函数,如果接受字符串来自不可信的源,这两个函数会有严重的安全风险,Python 提供了足够的内省工具,大多数时候不需要这两个函数。
record_factory 函数创建的类不能够序列化,即不能使用 pikle 模块里的 dump/load 函数处理,
定制描述符的类装饰器
上一章的 LineItem 例子还有个问题,就是储存的属性不具有描述性,即属性 _Quantity#0 不便于调试,如果能存储成 _Quantity#weight 之类的就好多了,上一章说过,我们不能使用描述性的存储属性名称,因为实例化描述符时无法得知托管属性,如前面的 weight 的名称,可是如果组建好整个类,而且把描述符绑定到类属性后,我们就可以审查类,并为描述符设置合理的存储属性名称。LineItem 的 __new__ 方法可以做到这一点,因此,在 __init__ 方法中使用描述符时,存储属性已经设置了正确的名称。为了解决这个问题使用 __new__ 方法属于白费力气,每次新建 LineItem 实例时都会运行 __new__ 方法中的逻辑,可是一旦 LineItem 类构建好了,描述符与托管属性之间的绑定就不会变了。因此,我们要在创建类时设置存储属性的名称。使用类装饰器或元类可以做到这一点,我们先使用简单的方式。
类装饰器和函数装饰器非常类似,是参数为类对象的函数,返回原来的类或修改后的类
Step5: 类装饰器能以较简单的方式做到以前需要元类去做的事情 -- 创建类的时候定制类
类装饰器有个重大的缺点:只对直接依附的类有效,这意味着,被装饰的类的子类可能继承也可能不继承装饰类所做的改动,具体情况视改动方式而定
导入时和运行时比较
定义两个文件, evaltime.py
Step6: evalsupport.py
Step7: In [1]
Step8: 向上追溯,ABCMeta 最终所属的类也是 type,所有类都直接或间接的是 type 的实例,不过只有元类同事也是 type 的子类。若理解元类,一定要知道这种关系:元类(如 ABCMeta)从 type 类继承了构建类的能力。
我们要抓住的重点是,所有类都是 type 的实例,但元类还是 type 的子类,因此可以作为制造类的工厂,具体来说,元类可以通过实现 __init__ 方法来定制。元类的 __init__ 方法可以做到类装饰器能做的任何事情,但是作用更大
理解元类计算时间的练习
我们让 evalsupport.py 与原来相同,新建一个 evaltime_meta.py 作为主脚本:
Step10: 引入操作:
In [1]
Step12: 写成这种语法,用户完全不用知道描述符或元类,直接继承库中提供的类就能满足要求
元类的特殊用法 __prepare__
在某些应用中,可能要知道类属性的定义顺序,例如读写 csv 文件的库,用户定义的类可能想要把类中按顺序声明的字段与 csv 文件中的各列对应起来
前面说过,type 构造方法以及元类的 __new__ 和 __init__ 都接收类的定义体,形式是一个名称到属性的字典,也就是说,当元类或装饰器获得映射时,属性的顺序已经丢失了。
在 Python 3 中可以使用 __prepare__, 这个特殊方法只能在元类中使用,而且要声明为类方法(即,要使用 classmethod 类装饰器定义)。解释器调用元类 __new__ 方法之前会调用 __prepare__ 方法,使用类定义提中的属性创建映射。__prepare 第一个参数是元类,随后两个参数是类的名称以及组成的元祖,返回值是映射。元类构建新类时,__prepare__ 方法返回的映射会传给 __new__ 方法的最后一个参数,然后再传给 __init__ 方法 | Python Code:
class Dog:
def __init__(self, name, weight, owner):
self.name = name
self.weight = weight
self.owner = owner
rex = Dog('Rex', 30, 'Bob')
rex
Explanation: 类元编程是指在运行时创建或定制类的技艺,在 Python 中,类是一等对象,因此任何时候都可以使用函数新建类,无需使用 class 关键字。类装饰器也是函数,不公审查,修改甚至可以把被装饰类替换成其它类。最后,元类是类元编程最高级的工具,使用元类可以创建具有某种特质的全新类种,例如我们见过的抽象基类
类工厂函数
标准库的一个类工厂函数 -- collections.namedtuple。我们把一个类名和几个属性名传给这个函数,它会创建一个 tuple 的子类,其中元素通过名称获取,还为调试提供了友好的字符串表示(__repr__)
End of explanation
def record_factory(cls_name, field_names):
try:
# 这里体现了鸭子类型,尝试在都好或空格处拆分 field_names,如果失败,则假定 field_names 本身就是可迭代对象
field_names = field_names.replace(',', ' ').split()
except AttributeError: #不能调用 .replace 或 .split 方法
pass # 假定 field_names 本就是标识符组成的序列
field_names = tuple(field_names) #使用属性名构建元组,这将成为新建类的 __slots__属性
# __slots__变量,来限制该class能添加的属性
# 将变成新建类的 __init__ 方法
def __init__(self, *args, **kwargs):
attrs = dict(zip(self.__slots__, args))
attrs.update(kwargs)
for name, value in attrs.items():
setattr(self, name, value)
# 把类的实例变成可迭代对象,按照 __slots__ 设定的顺序产出字段值
def __iter__(self):
for name in self.__slots__:
yield getattr(self, name)
def __repr__(self):
values = ', '.join('{}={!r}'.format(*i) for i
in zip(self.__slots__, self))
return '{}({})'.format(self.__class__.__name__, values)
# 组建类属性字典
cls_attrs = dict(__slots__ = field_names,
__init__ = __init__, # 相当于 '__init__': __init__
__iter__ = __iter__,
__repr__ = __repr__)
# 用 type 方法构造,构建新类,然后返回
return type(cls_name, (object,), cls_attrs)
Dog = record_factory('Dog', 'name weight owner')
rex = Dog('Rex', 30, 'Bob')
rex
name, weight, _ = rex # 实例是可迭代对象,所以可以方便的拆包
name, weight
"{2}'s dog weight {1}kg".format(*rex) # 实例是可迭代对象,所以可以方便的拆包
rex.weight = 32 记录实例是可变的对象
rex
Dog.__mro__ # 新建的类继承 Object 类,和我们的工厂函数没有关系
Explanation: 这段代码各个字段名都出现了三次,让人厌烦,字符串表现形式也不友好,我们编写一个 record_factory 类工厂函数解决这个问题
End of explanation
Foo = type('Foo', (), {'bar':True})
Foo
Foo.bar
f = Foo()
f
Explanation: 通常,我们将 type 视为函数,因为我们像函数那样使用它,type(my_object) 获取对象所属的类 -- 作用与 my_object.__class__ 相同。然而,type 是一个类,当成类使用的时候传入三个参数可以新建一个类(是的,type 可以根据传入的不同参数有不同的用法
type(类名, 父类的元组(针对继承的情况,可以为空),包含属性的字典(名称和值))
比如下面的代码是等价的
```
class MyShinyClass:
pass
type('MyShinyClass', (), {})
```
因此我们要新建如下类:
class Foo:
bar = True
可以写成:
End of explanation
age = 35
print(age.__class__)
age.__class__.__class__
name = 'bob'
print(name.__class__)
name.__class__.__class__
def foo(): pass
foo.__class__
foo.__class__.__class__
class Bar(object): pass
b = Bar()
b.__class__
b.__class__.__class__
Explanation: 如果你继承 Foo 类,可以写成
FooChild = type('FooChild', (Foo,),{})
我们看到 type 函数可以创建一个类,因为 type 是元类,Python 中所有对象都是由 type 创建而来,注意,Python 中所有的东西都是对象,包括 整数,字符串、函数以及类,都由 type 创建而来
End of explanation
import abc
class AutoStorage:
__counter = 0
def __init__(self):
cls = self.__class__
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index)
cls.__counter += 1
def __get__(self, instance, owner):
if instance is None:
return self
else:
return getattr(instance, self.storage_name)
def __set__(self, instance, value):
setattr(instance, self.storage_name, value) # 不进行验证
class Validated(abc.ABC, AutoStorage): # 抽象类,也继承自 AutoStorage
def __set__(self, instance, value):
# __set__ 方法把验证委托给 validate 方法
value = self.validate(instance, value)
#返回的 value 值返回给超类的 __set__ 方法,存储值
super().__set__(instance, value)
@abc.abstractmethod
def validate(self, instance, value): # 抽象方法
'''return validated value or raise ValueError'''
class Quantity(Validated):
'''a number greater than zero'''
# 只需要根据不同的验证规则实现 validate 方法即可
def validate(self, instance, value):
if value <= 0:
raise ValueError('value must be > 0')
return value
class NonBlank(Validated):
'''a string with at least one not-space character'''
def validate(self, instance, value):
value = value.strip()
if len(value) == 0:
raise ValueError('value cannot be empty or blank')
return value
# class LineItem: # 托管类
# weight = Quantity()
# price = Quantity()
# description = NonBlank()
# def __init__(self, description, weight, price):
# self.description = description
# self.weight = weight
# self.price = price
# def subtotal(self):
# return self.weight * self.price
## --------------------
## 上面的和 上一章代码相同, LineItem 类只加了 1 行,在下面实现
## --------------------
def entity(cls):
for key, attr in cls.__dict__.items():
if isinstance(attr, Validated):
type_name = type(attr).__name__
attr.storage_name = '_{}#{}'.format(type_name, key)
return cls #返回修改后的类
@entity # 类装饰器,定义类的时候就会调用
class LineItem:
weight = Quantity()
price = Quantity()
description = NonBlank()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
raisins = LineItem('Golden raisins', 10, 6.95)
dir(raisins)[:3]
LineItem.description.storage_name
raisins.description
getattr(raisins, '_NonBlank#description')
Explanation: 总之,前面的 record_factory 函数最后一行会构建一个类,类的名称是 cls_name 参数的值,唯一直接超类是 object,有 __slots__, __init__, __iter__, __repr__ 四个类属性,其中后 3 个是实例方法。
我们本来可以将 __slots__ 类属性改成其它值,不过那样就要实现 __setattr__ 方法,为属性赋值时验证属性的名称,而且顺序相同,然而第 9 章说过,__slots__ 属性最主要特点就是节省内存,能处理数百万个实例,不过也有一些缺点。
把 3 个参数传给 type 是动态创建类的常用方式,如果查看 collections.namedtuple 源码会发现另一种方式,先声明一个 _class_template 变量,其值是字符串形式源码模板,然后在 namedtuple 函数中调用 _class_template.format(...) 方法,填充模板里的空白,最后,使用内置的 exec函数计算得到源码字符串
在 Python 元编程时,最好不要使用 exec 和 eval 函数,如果接受字符串来自不可信的源,这两个函数会有严重的安全风险,Python 提供了足够的内省工具,大多数时候不需要这两个函数。
record_factory 函数创建的类不能够序列化,即不能使用 pikle 模块里的 dump/load 函数处理,
定制描述符的类装饰器
上一章的 LineItem 例子还有个问题,就是储存的属性不具有描述性,即属性 _Quantity#0 不便于调试,如果能存储成 _Quantity#weight 之类的就好多了,上一章说过,我们不能使用描述性的存储属性名称,因为实例化描述符时无法得知托管属性,如前面的 weight 的名称,可是如果组建好整个类,而且把描述符绑定到类属性后,我们就可以审查类,并为描述符设置合理的存储属性名称。LineItem 的 __new__ 方法可以做到这一点,因此,在 __init__ 方法中使用描述符时,存储属性已经设置了正确的名称。为了解决这个问题使用 __new__ 方法属于白费力气,每次新建 LineItem 实例时都会运行 __new__ 方法中的逻辑,可是一旦 LineItem 类构建好了,描述符与托管属性之间的绑定就不会变了。因此,我们要在创建类时设置存储属性的名称。使用类装饰器或元类可以做到这一点,我们先使用简单的方式。
类装饰器和函数装饰器非常类似,是参数为类对象的函数,返回原来的类或修改后的类
End of explanation
#!/usr/bin/env python
# encoding: utf-8
from evalsupport import deco_alpha
print('<[0]> evaltime module start')
def test():
class Test:
print('<[1]> evaltime test Test')
class ClassOne():
print('<[2]> ClassOne body')
def __init__(self):
print('<[3]> ClassOne.__init__')
def __del__(self):
print('<[4]> ClassOne.__del__')
def method_x(self):
print('<[5]> ClassOne.method_x')
class ClassTwo(object):
print('<[6]> ClassTwo body')
@deco_alpha
class ClassThree():
print('<[7]> ClassThree body')
def method_y(self):
print('<[8]> ClassThree.method_y')
class ClassFour(ClassThree):
print('<[9]> ClassFour body')
def method_y(self):
print('<[10]> ClassFour.method_y')
if __name__ == '__main__':
print('<[11]> ClassOne tests', 30 * '.')
one = ClassOne()
one.method_x()
print('<[12]> ClassThree tests', 30 * '.')
three = ClassThree()
three.method_y()
print('<[13]> ClassFour tests', 30 * '.')
four = ClassFour()
four.method_y()
print('<[14]> evaltime module end')
Explanation: 类装饰器能以较简单的方式做到以前需要元类去做的事情 -- 创建类的时候定制类
类装饰器有个重大的缺点:只对直接依附的类有效,这意味着,被装饰的类的子类可能继承也可能不继承装饰类所做的改动,具体情况视改动方式而定
导入时和运行时比较
定义两个文件, evaltime.py
End of explanation
#!/usr/bin/env python
# encoding: utf-8
print('<[100]> evalsupport module start')
def deco_alpha(cls):
print('<[200]> deco_alpha')
def inner_1(self):
print('<[300]> deco_alpha:inner_1')
cls.method_y = inner_1
return cls
class MetaAleph(type):
print('<[400]> MetaAleph body')
def __init__(cls, name, bases, dic):
print('<[500]> MetaAleph.__init__')
def inner_2(self):
print('<[600]> MetaAleph.__init__:inner_2')
cls.method_z = inner_2
print('<[700]> evalsupport module end')
Explanation: evalsupport.py
End of explanation
import collections
collections.Iterable.__class__
import abc
abc.ABCMeta.__class__
abc.ABCMeta.__mro__
Explanation: In [1]: import evaltime
<[100]> evalsupport module start #evalsupport 模块中所有顶层代码在导入模块时执行,解释器会编译 deco_alpha 函数,但不会执行定义体
<[400]> MetaAleph body # 类定义体运行了
<[700]> evalsupport module end
<[0]> evaltime module start
<[2]> ClassOne body # 每个类的定义体都执行了
<[6]> ClassTwo body #包括嵌套的类
<[7]> ClassThree body
<[200]> deco_alpha # 先计算被装饰的 ClassThree 类定义体,然后运行装饰器函数
<[9]> ClassFour body
<[14]> evaltime module end #这里,evaltime 是被导入的,不会运行 if __name == '__main__'
(py35) kaka@kaka-deep:~/kaka$ python3 evaltime.py
<[100]> evalsupport module start
<[400]> MetaAleph body
<[700]> evalsupport module end
<[0]> evaltime module start
<[2]> ClassOne body
<[6]> ClassTwo body
<[7]> ClassThree body
<[200]> deco_alpha
<[9]> ClassFour body
<[11]> ClassOne tests ..............................
<[3]> ClassOne.__init__
<[5]> ClassOne.method_x
<[12]> ClassThree tests ..............................
<[300]> deco_alpha:inner_1 # 类装饰器改变了 ClassThree.method_y 方法
<[13]> ClassFour tests ..............................
<[10]> ClassFour.method_y
<[14]> evaltime module end
<[4]> ClassOne.__del__ # 程序结束后,绑定在全局变量 one 上的 ClassOne 实例才会被垃圾回收
元类基础知识
元类是制造类的工厂,不过不是函数,而是类。
根据 Python对象模型,类是对象,因此类肯定是另外某个类的实例,默认情况下,Python 中的类是 type 的实例,也就是说,type 是大多数内置的类和用户定义的类的元类,为了避免无限递归,type 是自身的实例。注意,我们没有说 str 或者 LineItem 继承自 type,而是说 str 和 LineItem 是 type 的实例。
object 类和 type 类之间的关系很独特,object 是 type 的实例,type 是 object 的子类,这种关系很独特,无法使用 Python 代码表述,因为其定义其中一个之前另一个必须存在,type 是自身的实例这一点也很神奇
除了 type,标准库中还有一些别的类,例如 ABCMeta 和 Enum。如下所示:
End of explanation
#!/usr/bin/env python
# encoding: utf-8
from evalsupport import deco_alpha
from evalsupport import MetaAleph
print('<[1]> evaltime module start')
@deco_alpha
class ClassThree():
print('<[2]> ClassThree body')
def method_y(self):
print('<[3]> ClassThree.method_y')
class ClassFour(ClassThree):
print('<[4]> ClassFour body')
def method_y(self):
print('<[5]> ClassFour.method_y')
class ClassFive(metaclass=MetaAleph):
print('<[6]> ClassFive body')
def __init__(self):
print('<[7]> ClassFive body')
def method_z(self):
print('<[8]> ClassFive.method_z')
class ClassSix(ClassFive):
print('<[9]> ClassSix body')
def method_z(self):
print('<[10]> ClassSix.method_z')
if __name__ == '__main__':
print('<[11]> ClassThree tests', 30 * '.')
three = ClassThree()
three.method_y()
print('<[12]> ClassFour tests', 30 * '.')
four = ClassFour()
four.method_y()
print('<[13]> ClassFive tests', 30 * '.')
five = ClassFive()
five.method_z()
print('<[14]> ClassSix tests', 30 * '.')
six = ClassSix()
six.method_z()
print('<[15]> evaltime module end')
Explanation: 向上追溯,ABCMeta 最终所属的类也是 type,所有类都直接或间接的是 type 的实例,不过只有元类同事也是 type 的子类。若理解元类,一定要知道这种关系:元类(如 ABCMeta)从 type 类继承了构建类的能力。
我们要抓住的重点是,所有类都是 type 的实例,但元类还是 type 的子类,因此可以作为制造类的工厂,具体来说,元类可以通过实现 __init__ 方法来定制。元类的 __init__ 方法可以做到类装饰器能做的任何事情,但是作用更大
理解元类计算时间的练习
我们让 evalsupport.py 与原来相同,新建一个 evaltime_meta.py 作为主脚本:
End of explanation
class EntityMeta(type):
元类,用于创建带有验证字段的业务实体
def __init__(cls, name, bases, attr_dict):
super().__init__(name, bases, attr_dict) # 在超类(这里是 type)上调用 __init__
for key, attr in attr_dict.items():
if isinstance(attr, Validated):
type_name = type(attr).__name__
attr.storage_name = '_{}#{}'.format(type_name, key)
class Entity(metaclass=EntityMeta): # 这个类只是为了用起来便利,这个模块的用户直接继承它即可,不用关心元类
'''带有验证字段的业务实体'''
class LineItem(Entity):
weight = Quantity()
price = Quantity()
description = NonBlank()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
Explanation: 引入操作:
In [1]: import evaltime_meta
<[100]> evalsupport module start
<[400]> MetaAleph body
<[700]> evalsupport module end
<[1]> evaltime module start
<[2]> ClassThree body
<[200]> deco_alpha
<[4]> ClassFour body
<[6]> ClassFive body
<[500]> MetaAleph.__init__ #与前面关键区别是,创建 ClassFive时调用了 MetaAleph.__init__ 方法
<[9]> ClassSix body
<[500]> MetaAleph.__init__ # 同上
<[15]> evaltime module end
Python 解释器计算 ClassFive 类的定义体时没有调用 type 构建具体的类定义体,而是调用 MetaAleph 类。MetaAleph 类的 __init__ 有 4 个参数。
self: 要初始化的对象,例如 ClassFive
name, bases, dic: 与构建类时传给 type 的参数一样
重新看一下这个类:
```
class MetaAleph(type):
print('<[400]> MetaAleph body')
def __init__(cls, name, bases, dic):
print('<[500]> MetaAleph.__init__')
def inner_2(self):
print('<[600]> MetaAleph.__init__:inner_2')
cls.method_z = inner_2
```
编写元类时候,通常把 self 参数改成 cls。__init__ 方法的定义体中定义了 inner_2 函数,然后绑定给 cls.method_z。MetaAleph.__init__ 方法签名中的 cls 指代要创建的类(例如 ClassFive)。而 inner_2 函数签名中的 self 最终是指代我们创建的类的实例(例如 ClassFive 类的实例)
运行脚本:
(pytorch) kaka@kaka-dell:~/kaka/python$ python3 evaltime_meta.py
<[100]> evalsupport module start
<[400]> MetaAleph body
<[700]> evalsupport module end
<[1]> evaltime module start
<[2]> ClassThree body
<[200]> deco_alpha
<[4]> ClassFour body
<[6]> ClassFive body
<[500]> MetaAleph.__init__
<[9]> ClassSix body
<[500]> MetaAleph.__init__
<[11]> ClassThree tests ..............................
<[300]> deco_alpha:inner_1
<[12]> ClassFour tests ..............................
<[5]> ClassFour.method_y
<[13]> ClassFive tests ..............................
<[7]> ClassFive body
<[600]> MetaAleph.__init__:inner_2 # MetaAleph 类的 __init__ 方法把ClassFive.method_z 方法替换成 inner_2 函数。
<[14]> ClassSix tests ..............................
<[7]> ClassFive body
<[600]> MetaAleph.__init__:inner_2 # ClassFive 的子类 ClassSix 也是一样
<[15]> evaltime module end
注意,ClassSix 类没有直接引用 MetaAleph 类,但是却收到了影响,因为它是 ClassFive 的子类,进而也是 MetaAleph 类的实例,所以由 MetaAleph.__init__ 实例化
定制描述符的元类
End of explanation
import collections
class EntityMeta(type):
元类,用于创建带有验证字段的业务实体
@classmethod
def __prepare__(cls, name, bases):
return collections.OrderedDict() # 返回空的 OrderedDict 实例,存储类属性
def __init__(cls, name, bases, attr_dict):
super().__init__(name, bases, attr_dict) # 在超类(这里是 type)上调用 __init__
cls._field_names = []
for key, attr in attr_dict.items():
if isinstance(attr, Validated):
type_name = type(attr).__name__
attr.storage_name = '_{}#{}'.format(type_name, key)
cls._field_names.append(key) # 按顺序存储类属性
class Entity(metaclass=EntityMeta): # 这个类只是为了用起来便利,这个模块的用户直接继承它即可,不用关心元类
'''带有验证字段的业务实体'''
@classmethod
def field_names(cls):
for name in cls._field_names:
yield name # 按照添加字段的顺序产出字段名称
class LineItem(Entity):
weight = Quantity()
price = Quantity()
description = NonBlank()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
for name in LineItem.field_names():
print(name)
Explanation: 写成这种语法,用户完全不用知道描述符或元类,直接继承库中提供的类就能满足要求
元类的特殊用法 __prepare__
在某些应用中,可能要知道类属性的定义顺序,例如读写 csv 文件的库,用户定义的类可能想要把类中按顺序声明的字段与 csv 文件中的各列对应起来
前面说过,type 构造方法以及元类的 __new__ 和 __init__ 都接收类的定义体,形式是一个名称到属性的字典,也就是说,当元类或装饰器获得映射时,属性的顺序已经丢失了。
在 Python 3 中可以使用 __prepare__, 这个特殊方法只能在元类中使用,而且要声明为类方法(即,要使用 classmethod 类装饰器定义)。解释器调用元类 __new__ 方法之前会调用 __prepare__ 方法,使用类定义提中的属性创建映射。__prepare 第一个参数是元类,随后两个参数是类的名称以及组成的元祖,返回值是映射。元类构建新类时,__prepare__ 方法返回的映射会传给 __new__ 方法的最后一个参数,然后再传给 __init__ 方法
End of explanation |
14,727 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Loschmidt Plots
Plots for the recirq.otoc.loschmidt.tilted_sqare_lattice algorithmic benchmark. See the analysis-walkthrough.ipynb notebook for more detail into the functions used to create these plots.
Step1: Load Results
Modify the list of run_ids passed to iterload_dataframes to load datasets.
Step2: Fit vs. Macrocycle Depth
For each topology, the success probability decays exponentially with respect to random circuit macrocycle depth. The fit parameter f is the layer fidelity corresponding to a random single qubit gates and entangling gates between all qubits.
Step3: Fit vs. Quantum Area
We define a quantity called quantum area (q_area) which is the circuit width (i.e. number of qubits) multiplied by its depth. This is the number of operations in the circuit (also including any idle operations). The fit parameter f is the per-operation, per-qubit fidelity. | Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
# Set up reasonable defaults for figure fonts
import matplotlib
matplotlib.rcParams.update(**{
'axes.titlesize': 14,
'axes.labelsize': 14,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'legend.fontsize': 12,
'legend.title_fontsize': 12,
'figure.figsize': (7, 5),
})
Explanation: Loschmidt Plots
Plots for the recirq.otoc.loschmidt.tilted_sqare_lattice algorithmic benchmark. See the analysis-walkthrough.ipynb notebook for more detail into the functions used to create these plots.
End of explanation
import pandas as pd
import numpy as np
import cirq_google as cg
import recirq.otoc.loschmidt.tilted_square_lattice.analysis as analysis
def iterload_dataframes(run_ids):
for run_id in run_ids:
raw_results = cg.ExecutableGroupResultFilesystemRecord.from_json(run_id=run_id).load()
yield analysis.loschmidt_results_to_dataframe(raw_results)
df = pd.concat(list(iterload_dataframes([
'simulated-1',
# ...
])))
print(len(df), 'rows')
df.head()
Explanation: Load Results
Modify the list of run_ids passed to iterload_dataframes to load datasets.
End of explanation
vs_depth_df, vs_depth_gb_cols = analysis.agg_vs_macrocycle_depth(df)
fit_df, exp_ansatz = analysis.fit_vs_macrocycle_depth(df)
total_df = pd.merge(vs_depth_df, fit_df, on=vs_depth_gb_cols)
colors = plt.get_cmap('tab10')
for i, row in total_df.iterrows():
plt.errorbar(
x=row['macrocycle_depth'],
y=row['success_probability_mean'],
yerr=row['success_probability_std'],
marker='o', capsize=5, ls='',
color=colors(i),
label=f'{row["width"]}x{row["height"]} ({row["n_qubits"]}q) {row["processor_str"]}; f={row["f"]:.3f}'
)
xx = np.linspace(np.min(row['macrocycle_depth']), np.max(row['macrocycle_depth']))
yy = exp_ansatz(xx, a=row['a'], f=row['f'])
plt.plot(xx, yy, ls='--', color=colors(i))
plt.legend(loc='best')
plt.yscale('log')
plt.xlabel('Macrocycle Depth')
plt.ylabel('Success Probability')
plt.tight_layout()
Explanation: Fit vs. Macrocycle Depth
For each topology, the success probability decays exponentially with respect to random circuit macrocycle depth. The fit parameter f is the layer fidelity corresponding to a random single qubit gates and entangling gates between all qubits.
End of explanation
vs_q_area_df, vs_q_area_gb_cols = analysis.agg_vs_q_area(df)
fit_df2, exp_ansatz_vs_q_area = analysis.fit_vs_q_area(df)
total_df2 = pd.merge(vs_q_area_df, fit_df2, on=vs_q_area_gb_cols)
colors = plt.get_cmap('tab10')
for i, row in total_df2.iterrows():
plt.errorbar(x=row['q_area'],
y=row['success_probability_mean'],
yerr=row['success_probability_std'],
color=colors(i), capsize=5, marker='o', ls='')
xx = np.linspace(np.min(row['q_area']), np.max(row['q_area']))
yy = exp_ansatz_vs_q_area(xx, a=row['a'], f=row['f'])
plt.plot(xx, yy, ls='--', color=colors(i),
label=f'{row["run_id"]}; f={row["f"]:.3f}'
)
plt.legend(loc='best')
plt.xlabel('Quantum Area')
plt.ylabel('Macrocycle Fidelity')
plt.yscale('log')
plt.tight_layout()
Explanation: Fit vs. Quantum Area
We define a quantity called quantum area (q_area) which is the circuit width (i.e. number of qubits) multiplied by its depth. This is the number of operations in the circuit (also including any idle operations). The fit parameter f is the per-operation, per-qubit fidelity.
End of explanation |
14,728 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Compute source power using DICS beamformer
Compute a Dynamic Imaging of Coherent Sources (DICS)
Step1: Reading the raw data and creating epochs
Step2: We are interested in the beta band. Define a range of frequencies, using a
log scale, from 12 to 30 Hz.
Step3: Computing the cross-spectral density matrix for the beta frequency band, for
different time intervals. We use a decim value of 20 to speed up the
computation in this example at the loss of accuracy.
Step4: To compute the source power for a frequency band, rather than each frequency
separately, we average the CSD objects across frequencies.
Step5: Computing DICS spatial filters using the CSD that was computed on the entire
timecourse.
Step6: Applying DICS spatial filters separately to the CSD computed using the
baseline and the CSD computed during the ERS activity.
Step7: Visualizing source power during ERS activity relative to the baseline power. | Python Code:
# Author: Marijn van Vliet <[email protected]>
# Roman Goj <[email protected]>
# Denis Engemann <[email protected]>
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import mne
from mne.datasets import somato
from mne.time_frequency import csd_morlet
from mne.beamformer import make_dics, apply_dics_csd
print(__doc__)
Explanation: Compute source power using DICS beamformer
Compute a Dynamic Imaging of Coherent Sources (DICS) :footcite:GrossEtAl2001
filter from single-trial activity to estimate source power across a frequency
band. This example demonstrates how to source localize the event-related
synchronization (ERS) of beta band activity in the
somato dataset <somato-dataset>.
End of explanation
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# Use a shorter segment of raw just for speed here
raw = mne.io.read_raw_fif(raw_fname)
raw.crop(0, 120) # one minute for speed (looks similar to using all ~800 sec)
# Read epochs
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-1.5, tmax=2, preload=True)
del raw
# Paths to forward operator and FreeSurfer subject directory
fname_fwd = op.join(data_path, 'derivatives', 'sub-{}'.format(subject),
'sub-{}_task-{}-fwd.fif'.format(subject, task))
subjects_dir = op.join(data_path, 'derivatives', 'freesurfer', 'subjects')
Explanation: Reading the raw data and creating epochs:
End of explanation
freqs = np.logspace(np.log10(12), np.log10(30), 9)
Explanation: We are interested in the beta band. Define a range of frequencies, using a
log scale, from 12 to 30 Hz.
End of explanation
csd = csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=20)
csd_baseline = csd_morlet(epochs, freqs, tmin=-1, tmax=0, decim=20)
# ERS activity starts at 0.5 seconds after stimulus onset
csd_ers = csd_morlet(epochs, freqs, tmin=0.5, tmax=1.5, decim=20)
info = epochs.info
del epochs
Explanation: Computing the cross-spectral density matrix for the beta frequency band, for
different time intervals. We use a decim value of 20 to speed up the
computation in this example at the loss of accuracy.
End of explanation
csd = csd.mean()
csd_baseline = csd_baseline.mean()
csd_ers = csd_ers.mean()
Explanation: To compute the source power for a frequency band, rather than each frequency
separately, we average the CSD objects across frequencies.
End of explanation
fwd = mne.read_forward_solution(fname_fwd)
filters = make_dics(info, fwd, csd, noise_csd=csd_baseline,
pick_ori='max-power', reduce_rank=True, real_filter=True)
del fwd
Explanation: Computing DICS spatial filters using the CSD that was computed on the entire
timecourse.
End of explanation
baseline_source_power, freqs = apply_dics_csd(csd_baseline, filters)
beta_source_power, freqs = apply_dics_csd(csd_ers, filters)
Explanation: Applying DICS spatial filters separately to the CSD computed using the
baseline and the CSD computed during the ERS activity.
End of explanation
stc = beta_source_power / baseline_source_power
message = 'DICS source power in the 12-30 Hz frequency band'
brain = stc.plot(hemi='both', views='axial', subjects_dir=subjects_dir,
subject=subject, time_label=message)
Explanation: Visualizing source power during ERS activity relative to the baseline power.
End of explanation |
14,729 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Summary tutorial
Step1: To check that everything is operating as expected we can check that the imports succeed.
Step2: Basic Example
Let's say that we have the following function and we wish to look at the to_look_at value.
Step3: With the summary module we can first annotate this value with summary.summary
Step4: Then we can transform the loss function with the function transformation
Step5: As currently returned, the dictionary contains extra information as well as potentially duplicate values. We can collapse these metrics into a single value with the following
Step6: The keys of this dictionary first show how the metric was aggregated. In this case there is only a single metric so the aggregation is ignored. This is followed by || and then the summary name.
One benefit of this function transformation is it can be nested with other jax function transformations. For example we can jit the transformed function like so
Step7: At this point aggregate_metric_list cannot be jit. In practice this is fine as it performs very little computation.
Aggregation of the same name summaries.
Consider the following fake function which calls a layer function twice.
This layer function creates a summary and thus two summary are created.
When running the transformed function we see not one, but two values returned.
Step8: These values can be combined with aggregate_metric_list as before, but this time the aggregation takes the mean. This mean is specified by the aggregation keyword argument in summary.summary which defaults to mean.
Step9: Another useful aggregation mode is "sample". As jax's random numbers are stateless, an additional RNG key must be passed in for this to work.
Step10: Finally, there is "collect" which concatenates all the values together into one long tensor after first raveling all the inputs. This is useful for extracting distributions of quantities.
Step11: Summary Scope
Sometimes it is useful to be able to group all summaries from a function inside some code block with some common name. This can be done with the summary_scope context.
Step12: Usage with function transforms.
Thanks to oryx, all of this functionality works well across a variety of function transformations.
Here is an example with a scan, vmap, and jit.
The aggregation modes will aggregate across all timesteps, and all batched dimensions.
Step13: Optionally compute metrics
Step14: Limitations and Gotchas
Requires value traced to be a function of input
At the moment summary.summary MUST be called with a descendant of whatever input is passed into the function wrapped by with_summary_output_reduced. In practice this is almost always the case as we seek to monitor changing values rather than constants.
To demonstrate this, note how the constant value is NOT logged out, but if add it to a*0 it does become logged out.
Step15: The rational for why this is a bit of a rabbit hole, but it is related to how tracing in jax work and is beyond the scope of this notebook.
No support for jax.lax.cond
At this point one cannot extract summaries out of jax conditionals. Sorry. If this is a limitation to you let us know as we have some ideas to make this work.
No dynamic names
At the moment, the tag, or the name of the summary, must be a string known at compile time. There is no support for dynamic summary names.
Alternatives for extracting information
Using this module is not the only way to extract information from a model. We discuss a couple other approaches.
"Thread" metrics through
One way to extract data from a function is to simply return the things we want to look at. As functions become more complex and nested this can become quite a pain as each one of these functions must pass out metric values. This process of spreading data throughout a bunch of functions is called "threading".
Threading also requires all pieces of code to be involved -- as one must thread these metrics everywhere.
Step16: jax.experimental.host_callback
Jax has some support to send data back from an accelerator back to the host while a ja program is running. This is exposed in jax.experimental.host_callback.
One can use this to print which is a quick way to get data out of a network. | Python Code:
!pip install --upgrade git+https://github.com/google/learned_optimization.git oryx tensorflow==2.8.0rc0 numpy
Explanation: Summary tutorial: Getting metrics out of your models
The goal of the learned_optimization.summary module is to seamlessly allow researchers to annotate and extract data from within a jax computation / machine learning model.
This could be anything from mean and standard deviation of an activation, to looking at the distribution of outputs.
Doing this in Jax can be challenging at times as code written in Jax can make use of a large number of function transformations making it difficult to reach in and look at a value.
This notebook discusses the learned_optimization.summary module which provides one solution to this problem and is discussed in this notebook.
Deps
In addition to learned_optimization, the summary module requires oryx. This can be a bit finicky to install at the moment as it relies upon particular versions of tensorflow (even though we never use these pieces).
All of learned_optimization will run without oryx, but to get summaries this module must be installed.
In a colab this is even more annoying as we must first upgrade the versions of some installed modules, restart the colab kernel, and then proceed to run the remainder of the cells.
End of explanation
import oryx
from learned_optimization import summary
assert summary.ORYX_LOGGING
Explanation: To check that everything is operating as expected we can check that the imports succeed.
End of explanation
import jax
import jax.numpy as jnp
def forward(params):
to_look_at = jnp.mean(params) * 2.
return params
def loss(parameters):
loss = jnp.mean(forward(parameters)**2)
return loss
value_grad_fn = jax.jit(jax.value_and_grad(loss))
value_grad_fn(1.0)
Explanation: Basic Example
Let's say that we have the following function and we wish to look at the to_look_at value.
End of explanation
def forward(params):
to_look_at = jnp.mean(params) * 2.
summary.summary("to_look_at", to_look_at)
return params
@jax.jit
def loss(parameters):
loss = jnp.mean(forward(parameters)**2)
return loss
Explanation: With the summary module we can first annotate this value with summary.summary
End of explanation
result, metrics = summary.with_summary_output_reduced(loss)(1.)
result, metrics
Explanation: Then we can transform the loss function with the function transformation: summary.with_summary_output_reduced.
This transformation goes through the computation and extracts all the tagged values and returns them to us by name in a dictionary.
In implementation, all the hard work here is done by the wonderful oryx library (in particular harvest).
When we wrap a function this, we return a tuple containing the original result, and a dictionary with the desired metrics.
End of explanation
summary.aggregate_metric_list([metrics])
Explanation: As currently returned, the dictionary contains extra information as well as potentially duplicate values. We can collapse these metrics into a single value with the following:
End of explanation
result, metrics = jax.jit(summary.with_summary_output_reduced(loss))(1.)
summary.aggregate_metric_list([metrics])
Explanation: The keys of this dictionary first show how the metric was aggregated. In this case there is only a single metric so the aggregation is ignored. This is followed by || and then the summary name.
One benefit of this function transformation is it can be nested with other jax function transformations. For example we can jit the transformed function like so:
End of explanation
def layer(params):
to_look_at = jnp.mean(params) * 2.
summary.summary("to_look_at", to_look_at)
return params * 2
@jax.jit
def loss(parameters):
loss = jnp.mean(layer(layer(parameters))**2)
return loss
result, metrics = summary.with_summary_output_reduced(loss)(1.)
result, metrics
Explanation: At this point aggregate_metric_list cannot be jit. In practice this is fine as it performs very little computation.
Aggregation of the same name summaries.
Consider the following fake function which calls a layer function twice.
This layer function creates a summary and thus two summary are created.
When running the transformed function we see not one, but two values returned.
End of explanation
summary.aggregate_metric_list([metrics])
Explanation: These values can be combined with aggregate_metric_list as before, but this time the aggregation takes the mean. This mean is specified by the aggregation keyword argument in summary.summary which defaults to mean.
End of explanation
def layer(params):
to_look_at = jnp.mean(params) * 2.
summary.summary("to_look_at", to_look_at, aggregation="sample")
return params * 2
@jax.jit
def loss(parameters):
loss = jnp.mean(layer(layer(parameters))**2)
return loss
key = jax.random.PRNGKey(0)
result, metrics = summary.with_summary_output_reduced(loss)(
1., sample_rng_key=key)
summary.aggregate_metric_list([metrics])
Explanation: Another useful aggregation mode is "sample". As jax's random numbers are stateless, an additional RNG key must be passed in for this to work.
End of explanation
def layer(params):
to_look_at = jnp.mean(params) * 2.
summary.summary(
"to_look_at", jnp.arange(10) * to_look_at, aggregation="collect")
return params * 2
@jax.jit
def loss(parameters):
loss = jnp.mean(layer(layer(parameters))**2)
return loss
key = jax.random.PRNGKey(0)
result, metrics = summary.with_summary_output_reduced(loss)(
1., sample_rng_key=key)
summary.aggregate_metric_list([metrics])
Explanation: Finally, there is "collect" which concatenates all the values together into one long tensor after first raveling all the inputs. This is useful for extracting distributions of quantities.
End of explanation
@jax.jit
def loss(parameters):
with summary.summary_scope("scope1"):
summary.summary("to_look_at", parameters)
with summary.summary_scope("nested"):
summary.summary("summary2", parameters)
with summary.summary_scope("scope2"):
summary.summary("to_look_at", parameters)
return parameters
key = jax.random.PRNGKey(0)
result, metrics = summary.with_summary_output_reduced(loss)(
1., sample_rng_key=key)
summary.aggregate_metric_list([metrics])
Explanation: Summary Scope
Sometimes it is useful to be able to group all summaries from a function inside some code block with some common name. This can be done with the summary_scope context.
End of explanation
@jax.jit
def fn(a):
summary.summary("other_val", a[2])
def update(state, _):
s = state + 1
summary.summary("mean_loop", s[0])
summary.summary("collect_loop", s[0], aggregation="collect")
return s, s
a, _ = jax.lax.scan(update, a, jnp.arange(20))
return a * 2
vmap_fn = jax.vmap(fn)
result, metrics = jax.jit(summary.with_summary_output_reduced(vmap_fn))(
jnp.tile(jnp.arange(4), (2, 2)))
summary.aggregate_metric_list([metrics])
Explanation: Usage with function transforms.
Thanks to oryx, all of this functionality works well across a variety of function transformations.
Here is an example with a scan, vmap, and jit.
The aggregation modes will aggregate across all timesteps, and all batched dimensions.
End of explanation
from learned_optimization import summary
import functools
def layer(params):
to_look_at = jnp.mean(params) * 2.
summary.summary("to_look_at", jnp.arange(10) * to_look_at)
return params * 2
@functools.partial(jax.jit, static_argnames="with_summary")
@summary.add_with_summary
def loss(parameters):
loss = jnp.mean(layer(layer(parameters))**2)
return loss
res, metrics = loss(1., with_summary=False)
print("No metrics", summary.aggregate_metric_list([metrics]))
res, metrics = loss(1., with_summary=True)
print("With metrics", summary.aggregate_metric_list([metrics]))
Explanation: Optionally compute metrics: @add_with_metrics
Oftentimes it is useful to define two "versions" of a function -- one with metrics, and one without -- as sometimes the computation of the metrics adds unneeded overhead that does not need to be run every iteration.
To create these two versions one can simply wrap the function with the add_with_summary decorator.
This adds both a keyword argument, and an extra return to the wrapped function which switches between computing metrics, or not.
End of explanation
def monitor(a):
summary.summary("with_input", a)
summary.summary("constant", 2.0)
summary.summary("constant_with_inp", 2.0 + (a * 0))
return a
result, metrics = summary.with_summary_output_reduced(monitor)(1.)
summary.aggregate_metric_list([metrics])
Explanation: Limitations and Gotchas
Requires value traced to be a function of input
At the moment summary.summary MUST be called with a descendant of whatever input is passed into the function wrapped by with_summary_output_reduced. In practice this is almost always the case as we seek to monitor changing values rather than constants.
To demonstrate this, note how the constant value is NOT logged out, but if add it to a*0 it does become logged out.
End of explanation
def lossb(p):
to_look_at = jnp.mean(123.)
return p * 2, to_look_at
def loss(parameters):
l = jnp.mean(parameters**2)
l, to_look_at = lossb(l)
return l, to_look_at
value_grad_fn = jax.jit(jax.value_and_grad(loss, has_aux=True))
(loss, to_look_at), g = value_grad_fn(1.0)
print(to_look_at)
Explanation: The rational for why this is a bit of a rabbit hole, but it is related to how tracing in jax work and is beyond the scope of this notebook.
No support for jax.lax.cond
At this point one cannot extract summaries out of jax conditionals. Sorry. If this is a limitation to you let us know as we have some ideas to make this work.
No dynamic names
At the moment, the tag, or the name of the summary, must be a string known at compile time. There is no support for dynamic summary names.
Alternatives for extracting information
Using this module is not the only way to extract information from a model. We discuss a couple other approaches.
"Thread" metrics through
One way to extract data from a function is to simply return the things we want to look at. As functions become more complex and nested this can become quite a pain as each one of these functions must pass out metric values. This process of spreading data throughout a bunch of functions is called "threading".
Threading also requires all pieces of code to be involved -- as one must thread these metrics everywhere.
End of explanation
from jax.experimental import host_callback as hcb
def loss(parameters):
loss = jnp.mean(parameters**2)
to_look_at = jnp.mean(123.)
hcb.id_print(to_look_at, name="to_look_at")
return loss
value_grad_fn = jax.jit(jax.value_and_grad(loss))
_ = value_grad_fn(1.0)
Explanation: jax.experimental.host_callback
Jax has some support to send data back from an accelerator back to the host while a ja program is running. This is exposed in jax.experimental.host_callback.
One can use this to print which is a quick way to get data out of a network.
End of explanation |
14,730 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Reusable Embeddings
Learning Objectives
1. Learn how to use a pre-trained TF Hub text modules to generate sentence vectors
1. Learn how to incorporate a pre-trained TF-Hub module into a Keras model
1. Learn how to deploy and use a text model on CAIP
Introduction
In this notebook, we will implement text models to recognize the probable source (GitHub, TechCrunch, or The New York Times) of the titles we have in the title dataset.
First, we will load and pre-process the texts and labels so that they are suitable to be fed to sequential Keras models with first layer being TF-hub pre-trained modules. Thanks to this first layer, we won't need to tokenize and integerize the text before passing it to our models. The pre-trained layer will take care of that for us, and consume directly raw text. However, we will still have to one-hot-encode each of the 3 classes into a 3 dimensional basis vector.
Then we will build, train and compare simple DNN models starting with different pre-trained TF-Hub layers.
Step1: Replace the variable values in the cell below
Step2: Create a Dataset from BigQuery
Hacker news headlines are available as a BigQuery public dataset. The dataset contains all headlines from the sites inception in October 2006 until October 2015.
Here is a sample of the dataset
Step3: Let's do some regular expression parsing in BigQuery to get the source of the newspaper article from the URL. For example, if the url is http
Step6: Now that we have good parsing of the URL to get the source, let's put together a dataset of source and titles. This will be our labeled dataset for machine learning.
Step7: For ML training, we usually need to split our dataset into training and evaluation datasets (and perhaps an independent test dataset if we are going to do model or feature selection based on the evaluation dataset). AutoML however figures out on its own how to create these splits, so we won't need to do that here.
Step8: AutoML for text classification requires that
* the dataset be in csv form with
* the first column being the texts to classify or a GCS path to the text
* the last colum to be the text labels
The dataset we pulled from BiqQuery satisfies these requirements.
Step9: Let's make sure we have roughly the same number of labels for each of our three labels
Step10: Finally we will save our data, which is currently in-memory, to disk.
We will create a csv file containing the full dataset and another containing only 1000 articles for development.
Note
Step11: Now let's sample 1000 articles from the full dataset and make sure we have enough examples for each label in our sample dataset (see here for further details on how to prepare data for AutoML).
Step12: Let's write the sample datatset to disk.
Step13: Let's start by specifying where the information about the trained models will be saved as well as where our dataset is located
Step14: Loading the dataset
As in the previous labs, our dataset consists of titles of articles along with the label indicating from which source these articles have been taken from (GitHub, TechCrunch, or The New York Times)
Step15: Let's look again at the number of examples per label to make sure we have a well-balanced dataset
Step16: Preparing the labels
In this lab, we will use pre-trained TF-Hub embeddings modules for english for the first layer of our models. One immediate
advantage of doing so is that the TF-Hub embedding module will take care for us of processing the raw text.
This also means that our model will be able to consume text directly instead of sequences of integers representing the words.
However, as before, we still need to preprocess the labels into one-hot-encoded vectors
Step17: Preparing the train/test splits
Let's split our data into train and test splits
Step18: To be on the safe side, we verify that the train and test splits
have roughly the same number of examples per class.
Since it is the case, accuracy will be a good metric to use to measure
the performance of our models.
Step19: Now let's create the features and labels we will feed our models with
Step20: NNLM Model
We will first try a word embedding pre-trained using a Neural Probabilistic Language Model. TF-Hub has a 50-dimensional one called
nnlm-en-dim50-with-normalization, which also
normalizes the vectors produced.
Lab Task 1a
Step21: Note that this TF-Hub embedding produces a single 50-dimensional vector when passed a sentence
Step22: Swivel Model
Then we will try a word embedding obtained using Swivel, an algorithm that essentially factorizes word co-occurrence matrices to create the words embeddings.
TF-Hub hosts the pretrained gnews-swivel-20dim-with-oov 20-dimensional Swivel module.
Lab Task 1c
Step23: Similarly as the previous pre-trained embedding, it outputs a single vector when passed a sentence
Step24: Building the models
Let's write a function that
takes as input an instance of a KerasLayer (i.e. the swivel_module or the nnlm_module we constructed above) as well as the name of the model (say swivel or nnlm)
returns a compiled Keras sequential model starting with this pre-trained TF-hub layer, adding one or more dense relu layers to it, and ending with a softmax layer giving the probability of each of the classes
Step25: Let's also wrap the training code into a train_and_evaluate function that
* takes as input the training and validation data, as well as the compiled model itself, and the batch_size
* trains the compiled model for 100 epochs at most, and does early-stopping when the validation loss is no longer decreasing
* returns an history object, which will help us to plot the learning curves
Step26: Training NNLM
Step27: Training Swivel
Step28: Comparing the models
Swivel trains faster but achieves a lower validation accuracy, and requires more epochs to train on.
At last, let's compare all the models we have trained at once using TensorBoard in order
to choose the one that overfits the less for the same performance level.
Run the output of the following command in your Cloud Shell to launch TensorBoard, and use the Web Preview on port 6006 to view it.
Step29: Deploying the model
The first step is to serialize one of our trained Keras model as a SavedModel
Step30: Then we can deploy the model using the gcloud CLI as before
Step31: Note the ENDPOINT_RESOURCENAME above as you'll need it below for the prediction.
Before we try our deployed model, let's inspect its signature to know what to send to the deployed API
Step32: Let's go ahead and hit our model
Step33: Insert below the ENDPOINT_RESOURCENAME from the deployment code above. | Python Code:
import os
import pandas as pd
from google.cloud import bigquery
Explanation: Reusable Embeddings
Learning Objectives
1. Learn how to use a pre-trained TF Hub text modules to generate sentence vectors
1. Learn how to incorporate a pre-trained TF-Hub module into a Keras model
1. Learn how to deploy and use a text model on CAIP
Introduction
In this notebook, we will implement text models to recognize the probable source (GitHub, TechCrunch, or The New York Times) of the titles we have in the title dataset.
First, we will load and pre-process the texts and labels so that they are suitable to be fed to sequential Keras models with first layer being TF-hub pre-trained modules. Thanks to this first layer, we won't need to tokenize and integerize the text before passing it to our models. The pre-trained layer will take care of that for us, and consume directly raw text. However, we will still have to one-hot-encode each of the 3 classes into a 3 dimensional basis vector.
Then we will build, train and compare simple DNN models starting with different pre-trained TF-Hub layers.
End of explanation
PROJECT = !(gcloud config get-value core/project)
PROJECT = PROJECT[0]
BUCKET = PROJECT # defaults to PROJECT
REGION = "us-central1" # Replace with your REGION
os.environ["PROJECT"] = PROJECT
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
%%bash
gcloud config set project $PROJECT
gcloud config set ai/region $REGION
Explanation: Replace the variable values in the cell below:
End of explanation
%%bigquery --project $PROJECT
SELECT
url, title, score
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
LENGTH(title) > 10
AND score > 10
AND LENGTH(url) > 0
LIMIT 10
Explanation: Create a Dataset from BigQuery
Hacker news headlines are available as a BigQuery public dataset. The dataset contains all headlines from the sites inception in October 2006 until October 2015.
Here is a sample of the dataset:
End of explanation
%%bigquery --project $PROJECT
SELECT
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source,
COUNT(title) AS num_articles
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$')
AND LENGTH(title) > 10
GROUP BY
source
ORDER BY num_articles DESC
LIMIT 100
Explanation: Let's do some regular expression parsing in BigQuery to get the source of the newspaper article from the URL. For example, if the url is http://mobile.nytimes.com/...., I want to be left with <i>nytimes</i>
End of explanation
regex = ".*://(.[^/]+)/"
sub_query =
SELECT
title,
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '{0}'), '.'))[OFFSET(1)] AS source
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '{0}'), '.com$')
AND LENGTH(title) > 10
.format(
regex
)
query =
SELECT
LOWER(REGEXP_REPLACE(title, '[^a-zA-Z0-9 $.-]', ' ')) AS title,
source
FROM
({sub_query})
WHERE (source = 'github' OR source = 'nytimes' OR source = 'techcrunch')
.format(
sub_query=sub_query
)
print(query)
Explanation: Now that we have good parsing of the URL to get the source, let's put together a dataset of source and titles. This will be our labeled dataset for machine learning.
End of explanation
bq = bigquery.Client(project=PROJECT)
title_dataset = bq.query(query).to_dataframe()
title_dataset.head()
Explanation: For ML training, we usually need to split our dataset into training and evaluation datasets (and perhaps an independent test dataset if we are going to do model or feature selection based on the evaluation dataset). AutoML however figures out on its own how to create these splits, so we won't need to do that here.
End of explanation
print(f"The full dataset contains {len(title_dataset)} titles")
Explanation: AutoML for text classification requires that
* the dataset be in csv form with
* the first column being the texts to classify or a GCS path to the text
* the last colum to be the text labels
The dataset we pulled from BiqQuery satisfies these requirements.
End of explanation
title_dataset.source.value_counts()
Explanation: Let's make sure we have roughly the same number of labels for each of our three labels:
End of explanation
DATADIR = "./data/"
if not os.path.exists(DATADIR):
os.makedirs(DATADIR)
FULL_DATASET_NAME = "titles_full.csv"
FULL_DATASET_PATH = os.path.join(DATADIR, FULL_DATASET_NAME)
# Let's shuffle the data before writing it to disk.
title_dataset = title_dataset.sample(n=len(title_dataset))
title_dataset.to_csv(
FULL_DATASET_PATH, header=False, index=False, encoding="utf-8"
)
Explanation: Finally we will save our data, which is currently in-memory, to disk.
We will create a csv file containing the full dataset and another containing only 1000 articles for development.
Note: It may take a long time to train AutoML on the full dataset, so we recommend to use the sample dataset for the purpose of learning the tool.
End of explanation
sample_title_dataset = title_dataset.sample(n=1000)
sample_title_dataset.source.value_counts()
Explanation: Now let's sample 1000 articles from the full dataset and make sure we have enough examples for each label in our sample dataset (see here for further details on how to prepare data for AutoML).
End of explanation
SAMPLE_DATASET_NAME = "titles_sample.csv"
SAMPLE_DATASET_PATH = os.path.join(DATADIR, SAMPLE_DATASET_NAME)
sample_title_dataset.to_csv(
SAMPLE_DATASET_PATH, header=False, index=False, encoding="utf-8"
)
sample_title_dataset.head()
import datetime
import os
import shutil
import pandas as pd
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
from tensorflow_hub import KerasLayer
print(tf.__version__)
%matplotlib inline
Explanation: Let's write the sample datatset to disk.
End of explanation
MODEL_DIR = f"gs://{BUCKET}/text_models"
Explanation: Let's start by specifying where the information about the trained models will be saved as well as where our dataset is located:
End of explanation
ls $DATADIR
DATASET_NAME = "titles_full.csv"
TITLE_SAMPLE_PATH = os.path.join(DATADIR, DATASET_NAME)
COLUMNS = ["title", "source"]
titles_df = pd.read_csv(TITLE_SAMPLE_PATH, header=None, names=COLUMNS)
titles_df.head()
Explanation: Loading the dataset
As in the previous labs, our dataset consists of titles of articles along with the label indicating from which source these articles have been taken from (GitHub, TechCrunch, or The New York Times):
End of explanation
titles_df.source.value_counts()
Explanation: Let's look again at the number of examples per label to make sure we have a well-balanced dataset:
End of explanation
CLASSES = {"github": 0, "nytimes": 1, "techcrunch": 2}
N_CLASSES = len(CLASSES)
def encode_labels(sources):
classes = [CLASSES[source] for source in sources]
one_hots = to_categorical(classes, num_classes=N_CLASSES)
return one_hots
encode_labels(titles_df.source[:4])
Explanation: Preparing the labels
In this lab, we will use pre-trained TF-Hub embeddings modules for english for the first layer of our models. One immediate
advantage of doing so is that the TF-Hub embedding module will take care for us of processing the raw text.
This also means that our model will be able to consume text directly instead of sequences of integers representing the words.
However, as before, we still need to preprocess the labels into one-hot-encoded vectors:
End of explanation
N_TRAIN = int(len(titles_df) * 0.95)
titles_train, sources_train = (
titles_df.title[:N_TRAIN],
titles_df.source[:N_TRAIN],
)
titles_valid, sources_valid = (
titles_df.title[N_TRAIN:],
titles_df.source[N_TRAIN:],
)
Explanation: Preparing the train/test splits
Let's split our data into train and test splits:
End of explanation
sources_train.value_counts()
sources_valid.value_counts()
Explanation: To be on the safe side, we verify that the train and test splits
have roughly the same number of examples per class.
Since it is the case, accuracy will be a good metric to use to measure
the performance of our models.
End of explanation
X_train, Y_train = titles_train.values, encode_labels(sources_train)
X_valid, Y_valid = titles_valid.values, encode_labels(sources_valid)
X_train[:3]
Y_train[:3]
Explanation: Now let's create the features and labels we will feed our models with:
End of explanation
NNLM = "https://tfhub.dev/google/nnlm-en-dim50/2"
nnlm_module = KerasLayer(
# TODO
)
Explanation: NNLM Model
We will first try a word embedding pre-trained using a Neural Probabilistic Language Model. TF-Hub has a 50-dimensional one called
nnlm-en-dim50-with-normalization, which also
normalizes the vectors produced.
Lab Task 1a: Import NNLM TF Hub module into KerasLayer
Once loaded from its url, the TF-hub module can be used as a normal Keras layer in a sequential or functional model. Since we have enough data to fine-tune the parameters of the pre-trained embedding itself, we will set trainable=True in the KerasLayer that loads the pre-trained embedding:
End of explanation
nnlm_module(
tf.constant(
[
# TODO
]
)
)
Explanation: Note that this TF-Hub embedding produces a single 50-dimensional vector when passed a sentence:
Lab Task 1b: Use module to encode a sentence string
End of explanation
SWIVEL = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim-with-oov/1"
swivel_module = KerasLayer(
# TODO
)
Explanation: Swivel Model
Then we will try a word embedding obtained using Swivel, an algorithm that essentially factorizes word co-occurrence matrices to create the words embeddings.
TF-Hub hosts the pretrained gnews-swivel-20dim-with-oov 20-dimensional Swivel module.
Lab Task 1c: Import Swivel TF Hub module into KerasLayer
End of explanation
swivel_module(
tf.constant(
[
# TODO
]
)
)
Explanation: Similarly as the previous pre-trained embedding, it outputs a single vector when passed a sentence:
Lab Task 1d: Use module to encode a sentence string
End of explanation
def build_model(hub_module, name):
model = Sequential(
[
# TODO
Dense(16, activation="relu"),
Dense(N_CLASSES, activation="softmax"),
],
name=name,
)
model.compile(
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]
)
return model
Explanation: Building the models
Let's write a function that
takes as input an instance of a KerasLayer (i.e. the swivel_module or the nnlm_module we constructed above) as well as the name of the model (say swivel or nnlm)
returns a compiled Keras sequential model starting with this pre-trained TF-hub layer, adding one or more dense relu layers to it, and ending with a softmax layer giving the probability of each of the classes:
Lab Task 2: Incorporate a pre-trained TF Hub module as first layer of Keras Sequential Model
End of explanation
def train_and_evaluate(train_data, val_data, model, batch_size=5000):
X_train, Y_train = train_data
tf.random.set_seed(33)
model_dir = os.path.join(MODEL_DIR, model.name)
if tf.io.gfile.exists(model_dir):
tf.io.gfile.rmtree(model_dir)
history = model.fit(
X_train,
Y_train,
epochs=100,
batch_size=batch_size,
validation_data=val_data,
callbacks=[EarlyStopping(patience=1), TensorBoard(model_dir)],
)
return history
Explanation: Let's also wrap the training code into a train_and_evaluate function that
* takes as input the training and validation data, as well as the compiled model itself, and the batch_size
* trains the compiled model for 100 epochs at most, and does early-stopping when the validation loss is no longer decreasing
* returns an history object, which will help us to plot the learning curves
End of explanation
data = (X_train, Y_train)
val_data = (X_valid, Y_valid)
nnlm_model = build_model(nnlm_module, "nnlm")
nnlm_history = train_and_evaluate(data, val_data, nnlm_model)
history = nnlm_history
pd.DataFrame(history.history)[["loss", "val_loss"]].plot()
pd.DataFrame(history.history)[["accuracy", "val_accuracy"]].plot()
Explanation: Training NNLM
End of explanation
swivel_model = build_model(swivel_module, name="swivel")
swivel_history = train_and_evaluate(data, val_data, swivel_model)
history = swivel_history
pd.DataFrame(history.history)[["loss", "val_loss"]].plot()
pd.DataFrame(history.history)[["accuracy", "val_accuracy"]].plot()
Explanation: Training Swivel
End of explanation
!echo tensorboard --logdir $MODEL_DIR --port 6006
Explanation: Comparing the models
Swivel trains faster but achieves a lower validation accuracy, and requires more epochs to train on.
At last, let's compare all the models we have trained at once using TensorBoard in order
to choose the one that overfits the less for the same performance level.
Run the output of the following command in your Cloud Shell to launch TensorBoard, and use the Web Preview on port 6006 to view it.
End of explanation
OUTPUT_DIR = "./savedmodels_vertex"
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EXPORT_PATH = os.path.join(OUTPUT_DIR, "swivel")
os.environ["EXPORT_PATH"] = EXPORT_PATH
shutil.rmtree(EXPORT_PATH, ignore_errors=True)
tf.keras.models.save_model(swivel_model, EXPORT_PATH)
Explanation: Deploying the model
The first step is to serialize one of our trained Keras model as a SavedModel:
End of explanation
%%bash
# TODO 5
TIMESTAMP=$(date -u +%Y%m%d_%H%M%S)
MODEL_DISPLAYNAME=title_model_$TIMESTAMP
ENDPOINT_DISPLAYNAME=swivel_$TIMESTAMP
IMAGE_URI=# TODO
ARTIFACT_DIRECTORY=gs://${BUCKET}/${MODEL_DISPLAYNAME}/
echo $ARTIFACT_DIRECTORY
gsutil cp -r ${EXPORT_PATH}/* ${ARTIFACT_DIRECTORY}
# Model
MODEL_RESOURCENAME=$(gcloud ai models upload \
--region=$REGION \
--display-name=$MODEL_DISPLAYNAME \
--container-image-uri=$IMAGE_URI \
--artifact-uri=# TODO
--format="value(model)")
echo "MODEL_DISPLAYNAME=${MODEL_DISPLAYNAME}"
echo "MODEL_RESOURCENAME=${MODEL_RESOURCENAME}"
# Endpoint
ENDPOINT_RESOURCENAME=$(gcloud ai endpoints create \
--region=$REGION \
--display-name=$ENDPOINT_DISPLAYNAME \
--format="value(name)")
echo "ENDPOINT_DISPLAYNAME=${ENDPOINT_DISPLAYNAME}"
echo "ENDPOINT_RESOURCENAME=${ENDPOINT_RESOURCENAME}"
# Deployment
DEPLOYED_MODEL_DISPLAYNAME=${MODEL_DISPLAYNAME}_deployment
MACHINE_TYPE=n1-standard-2
MIN_REPLICA_COUNT=1
MAX_REPLICA_COUNT=3
gcloud ai endpoints deploy-model $ENDPOINT_RESOURCENAME \
--region=$REGION \
--model=$MODEL_RESOURCENAME \
--display-name=$DEPLOYED_MODEL_DISPLAYNAME \
--machine-type=$MACHINE_TYPE \
--min-replica-count=$MIN_REPLICA_COUNT \
--max-replica-count=$MAX_REPLICA_COUNT \
--traffic-split=0=100
Explanation: Then we can deploy the model using the gcloud CLI as before:
Lab Task 3a: Complete the following script to deploy the swivel model
End of explanation
!saved_model_cli show \
--tag_set serve \
--signature_def serving_default \
--dir {EXPORT_PATH}
!find {EXPORT_PATH}
Explanation: Note the ENDPOINT_RESOURCENAME above as you'll need it below for the prediction.
Before we try our deployed model, let's inspect its signature to know what to send to the deployed API:
End of explanation
%%writefile input.json
{
# TODO
}
Explanation: Let's go ahead and hit our model:
Lab Task 3b: Create the JSON object to send a title to the API you just deployed
(Hint: Look at the 'saved_model_cli show' command output above, as well as how to wrap your JSON instance into an array of instances at https://cloud.google.com/vertex-ai/docs/predictions/online-predictions-custom-models#formatting-instances-as-json)
End of explanation
%%bash
ENDPOINT_RESOURCENAME= #TODO: insert the ENDPOINT_RESOURCENAME here from above
gcloud ai endpoints predict $ENDPOINT_RESOURCENAME \
--region $REGION \
--json-request input.json
Explanation: Insert below the ENDPOINT_RESOURCENAME from the deployment code above.
End of explanation |
14,731 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Absorbing Random Walk Centrality
A short introduction by example
The absorbing-centrality module contains an implementation for a greedy algorithm to compute the k-central nodes in a graph according to the Absorbing Random-Walk (ARW) centrality measure. The measure and the greedy algorithm are discussed in our paper that appeared in ICDM 2015. Here we provide a short description of the measure and the associated problem, taken from its abstract.
"Given a graph $G = (V,E)$ and a set of query nodes $Q\subseteq V$, we aim
to identify the $k$ most central nodes in $G$ with respect to $Q$.
Specifically, we consider central nodes to be absorbing for
random walks that start at the query nodes $Q$. The goal is to
find the set of $k$ central nodes that minimizes the expected
length of a random walk until absorption."
This Python 3 notebook demonstrates usage of the greedy algorithm with a simple example.
Note
Step1: Optimizing Absorbing Random-Walk (ARW) Centrality
Now let's find the most central nodes according to ARW centrality. The problem takes the following parameters as input
Step2: We employ a greedy algorithm, implemented as absorbing_centrality.algorithms.greedy_team, to select the k = 3 most central nodes. The algorithm performs k steps, and at each step select one additional node that improves ARW centrality the most.
Step3: The algorithm returns a tuple with the following two elements
Step4: The i-th element of centrality_scores is the centrality score obtained for the i-th set of central nodes built by greedy. Moreover, the i-th node-set is always a subset of the (i+1)-th node-set.
Step5: Notice that ARW centrality decreases as more nodes are selected by greedy. That behavior is expected because, with more nodes acting as absorbing, random walks are more likely to be absorbed earlier, i.e. have decreased absorption time -- thus leading to decreased ARW centrality, by definition.
The $k$ nodes selected by greedy are returned as the last node-set and are associated with the last returned centrality score.
Step6: The plot below shows query nodes Q in red color and central nodes returned by greedy in blue.
Step7: Is greedy optimal?
The greedy algorithm discussed above is easy to implement but generally does not return optimal solutions to the ARW problem. This is most easily demonstrated for the case where
* all query nodes are also candidate nodes, i.e., $Q \subseteq D$, and
* we seek the k most central nodes, with $k = |Q| > 1$.
In that case, the set of query nodes is an optimal solution with centrality equal to zero (0); how close does greedy come to that?
Let us consider the same setting as in the previous example, but this time ask greedy for the most central k = 5 nodes.
Step8: All other parameters of the problem remain the same and we invoke greedy as before.
Step9: Let us plot greedy's solution. The plot below shows in blue those nodes returned as central by greedy and in red the query nodes that were not selected as central.
Step10: We notice that the $k = 5$ nodes selected by greedy are not the same as the set of $k = 5$ query nodes. It is easy to see that the set of $k = 5$ query nodes would be the optimal solution in this case, with centrality zero (0), as all random walks starting from them would be absorbed immediately.
Step11: How good is greedy?
Greedy misses the optimal solution, but it does not perform arbitrarily badly.
Firstly, it is easy to see that greedy does find the optimal solution for $k = 1$.
Moreover, we have the following guarantee for $k > 1$
Step12: To demonstrate visually what the aforementioned guarantee means, let us consider the plot below. The plot shows with blue dots the ARW centrality of the node-sets built by greedy along its $k$ steps. In addition, the plot shows with grey horizontal lines the levels of $m$, $c_{greedy}$, and $c_{opt}$.
Step13: The length of the red segment in the plot is equal to $(m - c_{greedy})$. Intuitively, it the improvement in ARW centrality achieved by greedy as it builds node-sets of size $1$ to $k$.
Similarly, the length of the green segment is equal to $(m - c_{opt})$. Intuitively, it is the improvement in the optimal ARW centrality between node-sets of size $1$ to $k$.
The aforementioned guarantee means that the length of the red segment (improvement by greedy) will be at at least $(1 - \frac{1}{e}) \approx 0.63$ times the length of the green segment (optimal improvement). We can confirm numerically that it holds for this example.
Step17: Setup
Run these cells before other code cells. | Python Code:
graph = nx.karate_club_graph() # load the graph
node_positions = nx.spring_layout(graph) # fix the node positions
make_graph_plot(graph, node_positions, node_size = 0,
node_color = "white", with_labels = True)
Explanation: Absorbing Random Walk Centrality
A short introduction by example
The absorbing-centrality module contains an implementation for a greedy algorithm to compute the k-central nodes in a graph according to the Absorbing Random-Walk (ARW) centrality measure. The measure and the greedy algorithm are discussed in our paper that appeared in ICDM 2015. Here we provide a short description of the measure and the associated problem, taken from its abstract.
"Given a graph $G = (V,E)$ and a set of query nodes $Q\subseteq V$, we aim
to identify the $k$ most central nodes in $G$ with respect to $Q$.
Specifically, we consider central nodes to be absorbing for
random walks that start at the query nodes $Q$. The goal is to
find the set of $k$ central nodes that minimizes the expected
length of a random walk until absorption."
This Python 3 notebook demonstrates usage of the greedy algorithm with a simple example.
Note: Run the Setup cells at the bottom of the notebook before other code cells.
Dataset
For this example, we'll be using Zachary's Karate Club as our dataset.
Let's load and draw its graph.
End of explanation
Q = {5, 12, 14, 15, 26} # a small number of query nodes
D = graph.nodes() # all nodes are candidate nodes
alpha = 0.85 # the random walk restarts with
# probability 1 - α = 0.15 in each step
k = 3 # number of central nodes
Explanation: Optimizing Absorbing Random-Walk (ARW) Centrality
Now let's find the most central nodes according to ARW centrality. The problem takes the following parameters as input:
Query nodes Q: random walks in our model start from nodes in Q. In our examples here, we also assume that a random walk might start from each node in Q with equal probability.
Candidate nodes D: the set of nodes from which we select central nodes.
Restart parameter α: at each step, a random walk 'restarts' - i.e. moves to one of the query nodes in the next step - with probability (1-α), otherwise continues to an adjacent node.
The number of central nodes k: the number of candidate nodes that we wish to identify as central.
For our example, we have the following configuration:
End of explanation
result = arw.algorithms.greedy_team(graph, k, query = Q, candidates = D,
with_restarts = True, alpha = alpha)
Explanation: We employ a greedy algorithm, implemented as absorbing_centrality.algorithms.greedy_team, to select the k = 3 most central nodes. The algorithm performs k steps, and at each step select one additional node that improves ARW centrality the most.
End of explanation
centrality_scores, node_sets = result
Explanation: The algorithm returns a tuple with the following two elements:
* A list of centrality scores, one for each step of the algorithm.
* A list of node-sets, one for each step of the algorithm.
End of explanation
print("Greedy returned {0} centrality scores and {1} node-sets for k = {2}."\
.format(len(centrality_scores), len(node_sets), k))
for i in range(k):
print("Centrality score {0:.2f} for nodes {1}."
.format(centrality_scores[i][0], node_sets[i]))
Explanation: The i-th element of centrality_scores is the centrality score obtained for the i-th set of central nodes built by greedy. Moreover, the i-th node-set is always a subset of the (i+1)-th node-set.
End of explanation
central_nodes = node_sets[k-1]
solution_centrality = centrality_scores[k-1][0]
print("Greedy selected nodes {0} as central, with centrality score {1:.2f}."\
.format(central_nodes, solution_centrality))
Explanation: Notice that ARW centrality decreases as more nodes are selected by greedy. That behavior is expected because, with more nodes acting as absorbing, random walks are more likely to be absorbed earlier, i.e. have decreased absorption time -- thus leading to decreased ARW centrality, by definition.
The $k$ nodes selected by greedy are returned as the last node-set and are associated with the last returned centrality score.
End of explanation
query_node_params = NodeParams(Q, "red", 150)
central_node_params = NodeParams(central_nodes, "blue", 200)
node_color, node_size = set_node_color_and_size(graph,
query_node_params, central_node_params)
make_graph_plot(graph, node_positions, node_size, node_color, with_labels = False)
Explanation: The plot below shows query nodes Q in red color and central nodes returned by greedy in blue.
End of explanation
k = 5
Explanation: Is greedy optimal?
The greedy algorithm discussed above is easy to implement but generally does not return optimal solutions to the ARW problem. This is most easily demonstrated for the case where
* all query nodes are also candidate nodes, i.e., $Q \subseteq D$, and
* we seek the k most central nodes, with $k = |Q| > 1$.
In that case, the set of query nodes is an optimal solution with centrality equal to zero (0); how close does greedy come to that?
Let us consider the same setting as in the previous example, but this time ask greedy for the most central k = 5 nodes.
End of explanation
greedy_result = arw.algorithms.greedy_team(graph, k, query = Q, candidates = D,
with_restarts = True, alpha = alpha)
greedy_centrality_scores, greedy_node_sets = greedy_result
greedy_central_nodes = greedy_node_sets[k-1] # the k central nodes found by greedy...
greedy_centrality = greedy_centrality_scores[k-1][0] # ... and their centrality
print("Greedy returned nodes {0} as central, with centrality score {1:.2f}."\
.format(greedy_central_nodes, greedy_centrality))
Explanation: All other parameters of the problem remain the same and we invoke greedy as before.
End of explanation
query_node_params = NodeParams(Q, "red", 150)
central_node_params = NodeParams(greedy_central_nodes, "blue", 200)
node_color, node_size = set_node_color_and_size(graph,
query_node_params, central_node_params)
make_graph_plot(graph, node_positions, node_size, node_color, with_labels = False)
Explanation: Let us plot greedy's solution. The plot below shows in blue those nodes returned as central by greedy and in red the query nodes that were not selected as central.
End of explanation
optimal_centrality = arw.absorbing_centrality(graph, Q, query=Q,
with_restarts=True, alpha=0.85)
print("Optimal centrality {0:.2f} achieved for query nodes {1}."\
.format(optimal_centrality, Q))
print("Greedy centrality {0:.2f} achieved for nodes {1}."\
.format(greedy_centrality, set(greedy_central_nodes)))
Explanation: We notice that the $k = 5$ nodes selected by greedy are not the same as the set of $k = 5$ query nodes. It is easy to see that the set of $k = 5$ query nodes would be the optimal solution in this case, with centrality zero (0), as all random walks starting from them would be absorbed immediately.
End of explanation
m = greedy_centrality_scores[0][0]
c_greedy = greedy_centrality_scores[k - 1][0] # for k = 5
c_opt = optimal_centrality # for k = 5
Explanation: How good is greedy?
Greedy misses the optimal solution, but it does not perform arbitrarily badly.
Firstly, it is easy to see that greedy does find the optimal solution for $k = 1$.
Moreover, we have the following guarantee for $k > 1$:
Let $m$ be the optimal ARW centrality for $k = 1$.
Moreover, let $c_{greedy}$ be the
centrality of the solution returned by greedy* for a
given $k > 1$ and $c_{opt}$ the optimal centrality for
the same $k > 1$. Then, we have
$$(m - c_{greedy}) \geq (1 - \frac{1}{e}) (m - c_{opt}).$$
*
End of explanation
make_approximation_plot(m, c_greedy, c_opt, greedy_centrality_scores)
Explanation: To demonstrate visually what the aforementioned guarantee means, let us consider the plot below. The plot shows with blue dots the ARW centrality of the node-sets built by greedy along its $k$ steps. In addition, the plot shows with grey horizontal lines the levels of $m$, $c_{greedy}$, and $c_{opt}$.
End of explanation
question = lambda x, y: "Yes" if x >= (1 - 1/np.e) * y else "No"
answer = question(m - c_greedy, m - c_opt)
print("Does the inequality hold? -{0}".format(answer))
Explanation: The length of the red segment in the plot is equal to $(m - c_{greedy})$. Intuitively, it the improvement in ARW centrality achieved by greedy as it builds node-sets of size $1$ to $k$.
Similarly, the length of the green segment is equal to $(m - c_{opt})$. Intuitively, it is the improvement in the optimal ARW centrality between node-sets of size $1$ to $k$.
The aforementioned guarantee means that the length of the red segment (improvement by greedy) will be at at least $(1 - \frac{1}{e}) \approx 0.63$ times the length of the green segment (optimal improvement). We can confirm numerically that it holds for this example.
End of explanation
import sys
import networkx as nx
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import collections
import absorbing_centrality as arw
%matplotlib inline
NodeParams = collections.namedtuple("NodeParams", ['nodes', 'color', 'size'])
def set_node_color_and_size(graph, query_node_params,
candidate_node_params,
default_color = "grey",
default_node_size = 30):
Return a list of colors for the graph nodes.
inverse_node_index = dict([(node, pos) for pos, node in enumerate(graph.nodes())])
# initialize all nodes with default color
node_color = len(graph) * [default_color]
node_size = len(graph) * [default_node_size]
for node_set_params in [query_node_params, candidate_node_params]:
if node_set_params:
for node in node_set_params.nodes:
pos = inverse_node_index[node]
node_color[pos] = node_set_params.color
node_size[pos] = node_set_params.size
return node_color, node_size
def make_graph_plot(graph, node_positions, node_size,
node_color, with_labels):
Plot a networkx graph with custom node positions,
sizes, color, and the option to have labels or not.
fig, ax = plt.subplots(1, 1, figsize = (12, 12))
ax.axis('off')
nx.draw(graph, ax = ax, with_labels = with_labels, font_size = 20,
node_size = node_size, node_color = node_color,
edge_color = "grey", pos = node_positions)
def make_approximation_plot(m, c_greedy, c_opt, greedy_centrality_scores):
Make a plot to demonstrate the approximation guarantee
for the greedy algorithm.
fig, ax = plt.subplots(1, 1, figsize = (10, 6))
x = range(1, 1+k)
y = [s[0] for s in greedy_centrality_scores]
ax.set(xlim = (0.5, k+0.5), ylim = (-0.5, max(y) * 1.2))
ax.set_xlabel("k", fontsize = 18)
ax.set_ylabel("ARW centrality", fontsize = 18)
ax.set(yticks = [m, c_greedy, c_opt],
yticklabels = ["m = {0:.2f}".format(m),
"c_greedy = {0:.2f}".format(c_greedy),
"c_opt = {0:.2f}".format(c_opt)])
_tmp = ax.plot((0, k+1), (y[0], y[0]), color = "grey", lw = 3)
_tmp = ax.plot((0, k+1), (y[-1], y[-1]), color = "grey", lw = 3)
_tmp = ax.plot((0, k+1), (0, 0), color = "grey", lw = 3)
_tmp = ax.plot((1.25, 1.25), (y[0], y[-1]), lw = 5, color = "red",
alpha = 0.7, label = "m - c_greedy")
_tmp = ax.plot((1.75, 1.75), (y[0], 0), lw = 5, color = "green", label = "m - c_opt")
_tmp = ax.scatter(x, y, s = 150, lw =2, label = "greedy centrality")
_tmp = ax.legend(loc = 1)
Explanation: Setup
Run these cells before other code cells.
End of explanation |
14,732 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Receptive Field Estimation and Prediction
This example reproduces figures from Lalor et al.'s mTRF toolbox in
MATLAB
Step1: Load the data from the publication
First we will load the data collected in
Step2: Create and fit a receptive field model
We will construct an encoding model to find the linear relationship between
a time-delayed version of the speech envelope and the EEG signal. This allows
us to make predictions about the response to new stimuli.
Step3: Investigate model coefficients
Finally, we will look at how the linear coefficients (sometimes
referred to as beta values) are distributed across time delays as well as
across the scalp. We will recreate figure 1 and figure 2 from
Step4: Create and fit a stimulus reconstruction model
We will now demonstrate another use case for the for the
Step5: Visualize stimulus reconstruction
To get a sense of our model performance, we can plot the actual and predicted
stimulus envelopes side by side.
Step6: Investigate model coefficients
Finally, we will look at how the decoding model coefficients are distributed
across the scalp. We will attempt to recreate figure 5_ from | Python Code:
# Authors: Chris Holdgraf <[email protected]>
# Eric Larson <[email protected]>
# Nicolas Barascud <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from os.path import join
import mne
from mne.decoding import ReceptiveField
from sklearn.model_selection import KFold
from sklearn.preprocessing import scale
Explanation: Receptive Field Estimation and Prediction
This example reproduces figures from Lalor et al.'s mTRF toolbox in
MATLAB :footcite:CrosseEtAl2016. We will show how the
:class:mne.decoding.ReceptiveField class
can perform a similar function along with scikit-learn. We will first fit a
linear encoding model using the continuously-varying speech envelope to predict
activity of a 128 channel EEG system. Then, we will take the reverse approach
and try to predict the speech envelope from the EEG (known in the literature
as a decoding model, or simply stimulus reconstruction).
End of explanation
path = mne.datasets.mtrf.data_path()
decim = 2
data = loadmat(join(path, 'speech_data.mat'))
raw = data['EEG'].T
speech = data['envelope'].T
sfreq = float(data['Fs'])
sfreq /= decim
speech = mne.filter.resample(speech, down=decim, npad='auto')
raw = mne.filter.resample(raw, down=decim, npad='auto')
# Read in channel positions and create our MNE objects from the raw data
montage = mne.channels.make_standard_montage('biosemi128')
info = mne.create_info(montage.ch_names, sfreq, 'eeg').set_montage(montage)
raw = mne.io.RawArray(raw, info)
n_channels = len(raw.ch_names)
# Plot a sample of brain and stimulus activity
fig, ax = plt.subplots()
lns = ax.plot(scale(raw[:, :800][0].T), color='k', alpha=.1)
ln1 = ax.plot(scale(speech[0, :800]), color='r', lw=2)
ax.legend([lns[0], ln1[0]], ['EEG', 'Speech Envelope'], frameon=False)
ax.set(title="Sample activity", xlabel="Time (s)")
mne.viz.tight_layout()
Explanation: Load the data from the publication
First we will load the data collected in :footcite:CrosseEtAl2016.
In this experiment subjects
listened to natural speech. Raw EEG and the speech stimulus are provided.
We will load these below, downsampling the data in order to speed up
computation since we know that our features are primarily low-frequency in
nature. Then we'll visualize both the EEG and speech envelope.
End of explanation
# Define the delays that we will use in the receptive field
tmin, tmax = -.2, .4
# Initialize the model
rf = ReceptiveField(tmin, tmax, sfreq, feature_names=['envelope'],
estimator=1., scoring='corrcoef')
# We'll have (tmax - tmin) * sfreq delays
# and an extra 2 delays since we are inclusive on the beginning / end index
n_delays = int((tmax - tmin) * sfreq) + 2
n_splits = 3
cv = KFold(n_splits)
# Prepare model data (make time the first dimension)
speech = speech.T
Y, _ = raw[:] # Outputs for the model
Y = Y.T
# Iterate through splits, fit the model, and predict/test on held-out data
coefs = np.zeros((n_splits, n_channels, n_delays))
scores = np.zeros((n_splits, n_channels))
for ii, (train, test) in enumerate(cv.split(speech)):
print('split %s / %s' % (ii + 1, n_splits))
rf.fit(speech[train], Y[train])
scores[ii] = rf.score(speech[test], Y[test])
# coef_ is shape (n_outputs, n_features, n_delays). we only have 1 feature
coefs[ii] = rf.coef_[:, 0, :]
times = rf.delays_ / float(rf.sfreq)
# Average scores and coefficients across CV splits
mean_coefs = coefs.mean(axis=0)
mean_scores = scores.mean(axis=0)
# Plot mean prediction scores across all channels
fig, ax = plt.subplots()
ix_chs = np.arange(n_channels)
ax.plot(ix_chs, mean_scores)
ax.axhline(0, ls='--', color='r')
ax.set(title="Mean prediction score", xlabel="Channel", ylabel="Score ($r$)")
mne.viz.tight_layout()
Explanation: Create and fit a receptive field model
We will construct an encoding model to find the linear relationship between
a time-delayed version of the speech envelope and the EEG signal. This allows
us to make predictions about the response to new stimuli.
End of explanation
# Print mean coefficients across all time delays / channels (see Fig 1)
time_plot = 0.180 # For highlighting a specific time.
fig, ax = plt.subplots(figsize=(4, 8))
max_coef = mean_coefs.max()
ax.pcolormesh(times, ix_chs, mean_coefs, cmap='RdBu_r',
vmin=-max_coef, vmax=max_coef, shading='gouraud')
ax.axvline(time_plot, ls='--', color='k', lw=2)
ax.set(xlabel='Delay (s)', ylabel='Channel', title="Mean Model\nCoefficients",
xlim=times[[0, -1]], ylim=[len(ix_chs) - 1, 0],
xticks=np.arange(tmin, tmax + .2, .2))
plt.setp(ax.get_xticklabels(), rotation=45)
mne.viz.tight_layout()
# Make a topographic map of coefficients for a given delay (see Fig 2C)
ix_plot = np.argmin(np.abs(time_plot - times))
fig, ax = plt.subplots()
mne.viz.plot_topomap(mean_coefs[:, ix_plot], pos=info, axes=ax, show=False,
vmin=-max_coef, vmax=max_coef)
ax.set(title="Topomap of model coefficients\nfor delay %s" % time_plot)
mne.viz.tight_layout()
Explanation: Investigate model coefficients
Finally, we will look at how the linear coefficients (sometimes
referred to as beta values) are distributed across time delays as well as
across the scalp. We will recreate figure 1 and figure 2 from
:footcite:CrosseEtAl2016.
End of explanation
# We use the same lags as in :footcite:`CrosseEtAl2016`. Negative lags now
# index the relationship
# between the neural response and the speech envelope earlier in time, whereas
# positive lags would index how a unit change in the amplitude of the EEG would
# affect later stimulus activity (obviously this should have an amplitude of
# zero).
tmin, tmax = -.2, 0.
# Initialize the model. Here the features are the EEG data. We also specify
# ``patterns=True`` to compute inverse-transformed coefficients during model
# fitting (cf. next section and :footcite:`HaufeEtAl2014`).
# We'll use a ridge regression estimator with an alpha value similar to
# Crosse et al.
sr = ReceptiveField(tmin, tmax, sfreq, feature_names=raw.ch_names,
estimator=1e4, scoring='corrcoef', patterns=True)
# We'll have (tmax - tmin) * sfreq delays
# and an extra 2 delays since we are inclusive on the beginning / end index
n_delays = int((tmax - tmin) * sfreq) + 2
n_splits = 3
cv = KFold(n_splits)
# Iterate through splits, fit the model, and predict/test on held-out data
coefs = np.zeros((n_splits, n_channels, n_delays))
patterns = coefs.copy()
scores = np.zeros((n_splits,))
for ii, (train, test) in enumerate(cv.split(speech)):
print('split %s / %s' % (ii + 1, n_splits))
sr.fit(Y[train], speech[train])
scores[ii] = sr.score(Y[test], speech[test])[0]
# coef_ is shape (n_outputs, n_features, n_delays). We have 128 features
coefs[ii] = sr.coef_[0, :, :]
patterns[ii] = sr.patterns_[0, :, :]
times = sr.delays_ / float(sr.sfreq)
# Average scores and coefficients across CV splits
mean_coefs = coefs.mean(axis=0)
mean_patterns = patterns.mean(axis=0)
mean_scores = scores.mean(axis=0)
max_coef = np.abs(mean_coefs).max()
max_patterns = np.abs(mean_patterns).max()
Explanation: Create and fit a stimulus reconstruction model
We will now demonstrate another use case for the for the
:class:mne.decoding.ReceptiveField class as we try to predict the stimulus
activity from the EEG data. This is known in the literature as a decoding, or
stimulus reconstruction model :footcite:CrosseEtAl2016.
A decoding model aims to find the
relationship between the speech signal and a time-delayed version of the EEG.
This can be useful as we exploit all of the available neural data in a
multivariate context, compared to the encoding case which treats each M/EEG
channel as an independent feature. Therefore, decoding models might provide a
better quality of fit (at the expense of not controlling for stimulus
covariance), especially for low SNR stimuli such as speech.
End of explanation
y_pred = sr.predict(Y[test])
time = np.linspace(0, 2., 5 * int(sfreq))
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(time, speech[test][sr.valid_samples_][:int(5 * sfreq)],
color='grey', lw=2, ls='--')
ax.plot(time, y_pred[sr.valid_samples_][:int(5 * sfreq)], color='r', lw=2)
ax.legend([lns[0], ln1[0]], ['Envelope', 'Reconstruction'], frameon=False)
ax.set(title="Stimulus reconstruction")
ax.set_xlabel('Time (s)')
mne.viz.tight_layout()
Explanation: Visualize stimulus reconstruction
To get a sense of our model performance, we can plot the actual and predicted
stimulus envelopes side by side.
End of explanation
time_plot = (-.140, -.125) # To average between two timepoints.
ix_plot = np.arange(np.argmin(np.abs(time_plot[0] - times)),
np.argmin(np.abs(time_plot[1] - times)))
fig, ax = plt.subplots(1, 2)
mne.viz.plot_topomap(np.mean(mean_coefs[:, ix_plot], axis=1),
pos=info, axes=ax[0], show=False,
vmin=-max_coef, vmax=max_coef)
ax[0].set(title="Model coefficients\nbetween delays %s and %s"
% (time_plot[0], time_plot[1]))
mne.viz.plot_topomap(np.mean(mean_patterns[:, ix_plot], axis=1),
pos=info, axes=ax[1],
show=False, vmin=-max_patterns, vmax=max_patterns)
ax[1].set(title="Inverse-transformed coefficients\nbetween delays %s and %s"
% (time_plot[0], time_plot[1]))
mne.viz.tight_layout()
Explanation: Investigate model coefficients
Finally, we will look at how the decoding model coefficients are distributed
across the scalp. We will attempt to recreate figure 5_ from
:footcite:CrosseEtAl2016. The
decoding model weights reflect the channels that contribute most toward
reconstructing the stimulus signal, but are not directly interpretable in a
neurophysiological sense. Here we also look at the coefficients obtained
via an inversion procedure :footcite:HaufeEtAl2014, which have a more
straightforward
interpretation as their value (and sign) directly relates to the stimulus
signal's strength (and effect direction).
End of explanation |
14,733 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Datasets
機器學習資料集/ 範例三
Step1: (二)資料集介紹
digits = datasets.load_digits() 將一個dict型別資料存入digits,我們可以用下面程式碼來觀察裏面資料
Step2: | 顯示 | 說明 |
| -- | -- |
| ('target_names', (3L,))| 共有三種鳶尾花 setosa, versicolor, virginica |
| ('data', (150L, 4L)) | 有150筆資料,共四種特徵 |
| ('target', (150L,))| 這150筆資料各是那一種鳶尾花|
| DESCR | 資料之描述 |
| feature_names| 四個特徵代表的意義,分別為 萼片(sepal)之長與寬以及花瓣(petal)之長與寬
為了用視覺化方式呈現這個資料集,下面程式碼首先使用PCA演算法將資料維度降低至3
Step3: 接下來將三個維度的資料立用mpl_toolkits.mplot3d.Axes3D 建立三維繪圖空間,並利用 scatter以三個特徵資料數值當成座標繪入空間,並以三種iris之數值 Y,來指定資料點的顏色。我們可以看出三種iris中,有一種明顯的可以與其他兩種區別,而另外兩種則無法明顯區別。 | Python Code:
#這行是在ipython notebook的介面裏專用,如果在其他介面則可以拿掉
%matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
Explanation: Datasets
機器學習資料集/ 範例三: The iris dataset
http://scikit-learn.org/stable/auto_examples/datasets/plot_iris_dataset.html
這個範例目的是介紹機器學習範例資料集中的iris 鳶尾花資料集
(一)引入函式庫及內建手寫數字資料庫
End of explanation
for key,value in iris.items() :
try:
print (key,value.shape)
except:
print (key)
print(iris['feature_names'])
Explanation: (二)資料集介紹
digits = datasets.load_digits() 將一個dict型別資料存入digits,我們可以用下面程式碼來觀察裏面資料
End of explanation
X_reduced = PCA(n_components=3).fit_transform(iris.data)
Explanation: | 顯示 | 說明 |
| -- | -- |
| ('target_names', (3L,))| 共有三種鳶尾花 setosa, versicolor, virginica |
| ('data', (150L, 4L)) | 有150筆資料,共四種特徵 |
| ('target', (150L,))| 這150筆資料各是那一種鳶尾花|
| DESCR | 資料之描述 |
| feature_names| 四個特徵代表的意義,分別為 萼片(sepal)之長與寬以及花瓣(petal)之長與寬
為了用視覺化方式呈現這個資料集,下面程式碼首先使用PCA演算法將資料維度降低至3
End of explanation
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
#接著我們嘗試將這個機器學習資料之描述檔顯示出來
print(iris['DESCR'])
Explanation: 接下來將三個維度的資料立用mpl_toolkits.mplot3d.Axes3D 建立三維繪圖空間,並利用 scatter以三個特徵資料數值當成座標繪入空間,並以三種iris之數值 Y,來指定資料點的顏色。我們可以看出三種iris中,有一種明顯的可以與其他兩種區別,而另外兩種則無法明顯區別。
End of explanation |
14,734 |
Given the following text problem statement, write Python code to implement the functionality described below in problem statement
Problem:
evaluate predictions
| Python Code::
mean_absolute_error(y_test, predictions)
|
14,735 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Initialization
Welcome to the first assignment of "Improving Deep Neural Networks".
Training your neural network requires specifying an initial value of the weights. A well chosen initialization method will help learning.
If you completed the previous course of this specialization, you probably followed our instructions for weight initialization, and it has worked out so far. But how do you choose the initialization for a new neural network? In this notebook, you will see how different initializations lead to different results.
A well chosen initialization can
Step2: You would like a classifier to separate the blue dots from the red dots.
1 - Neural Network model
You will use a 3-layer neural network (already implemented for you). Here are the initialization methods you will experiment with
Step4: 2 - Zero initialization
There are two types of parameters to initialize in a neural network
Step5: Expected Output
Step6: The performance is really bad, and the cost does not really decrease, and the algorithm performs no better than random guessing. Why? Lets look at the details of the predictions and the decision boundary
Step8: The model is predicting 0 for every example.
In general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, and you might as well be training a neural network with $n^{[l]}=1$ for every layer, and the network is no more powerful than a linear classifier such as logistic regression.
<font color='blue'>
What you should remember
Step9: Expected Output
Step10: If you see "inf" as the cost after the iteration 0, this is because of numerical roundoff; a more numerically sophisticated implementation would fix this. But this isn't worth worrying about for our purposes.
Anyway, it looks like you have broken symmetry, and this gives better results. than before. The model is no longer outputting all 0s.
Step12: Observations
Step13: Expected Output | Python Code:
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load image dataset: blue/red dots in circles
train_X, train_Y, test_X, test_Y = load_dataset()
Explanation: Initialization
Welcome to the first assignment of "Improving Deep Neural Networks".
Training your neural network requires specifying an initial value of the weights. A well chosen initialization method will help learning.
If you completed the previous course of this specialization, you probably followed our instructions for weight initialization, and it has worked out so far. But how do you choose the initialization for a new neural network? In this notebook, you will see how different initializations lead to different results.
A well chosen initialization can:
- Speed up the convergence of gradient descent
- Increase the odds of gradient descent converging to a lower training (and generalization) error
To get started, run the following cell to load the packages and the planar dataset you will try to classify.
End of explanation
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
learning_rate -- learning rate for gradient descent
num_iterations -- number of iterations to run gradient descent
print_cost -- if True, print the cost every 1000 iterations
initialization -- flag to choose which initialization to use ("zeros","random" or "he")
Returns:
parameters -- parameters learnt by the model
grads = {}
costs = [] # to keep track of the loss
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 10, 5, 1]
# Initialize parameters dictionary.
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
a3, cache = forward_propagation(X, parameters)
# Loss
cost = compute_loss(a3, Y)
# Backward propagation.
grads = backward_propagation(X, Y, cache)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 1000 iterations
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
# plot the loss
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
Explanation: You would like a classifier to separate the blue dots from the red dots.
1 - Neural Network model
You will use a 3-layer neural network (already implemented for you). Here are the initialization methods you will experiment with:
- Zeros initialization -- setting initialization = "zeros" in the input argument.
- Random initialization -- setting initialization = "random" in the input argument. This initializes the weights to large random values.
- He initialization -- setting initialization = "he" in the input argument. This initializes the weights to random values scaled according to a paper by He et al., 2015.
Instructions: Please quickly read over the code below, and run it. In the next part you will implement the three initialization methods that this model() calls.
End of explanation
# GRADED FUNCTION: initialize_parameters_zeros
def initialize_parameters_zeros(layers_dims):
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l-1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = initialize_parameters_zeros([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
Explanation: 2 - Zero initialization
There are two types of parameters to initialize in a neural network:
- the weight matrices $(W^{[1]}, W^{[2]}, W^{[3]}, ..., W^{[L-1]}, W^{[L]})$
- the bias vectors $(b^{[1]}, b^{[2]}, b^{[3]}, ..., b^{[L-1]}, b^{[L]})$
Exercise: Implement the following function to initialize all parameters to zeros. You'll see later that this does not work well since it fails to "break symmetry", but lets try it anyway and see what happens. Use np.zeros((..,..)) with the correct shapes.
End of explanation
parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
Explanation: Expected Output:
<table>
<tr>
<td>
**W1**
</td>
<td>
[[ 0. 0. 0.]
[ 0. 0. 0.]]
</td>
</tr>
<tr>
<td>
**b1**
</td>
<td>
[[ 0.]
[ 0.]]
</td>
</tr>
<tr>
<td>
**W2**
</td>
<td>
[[ 0. 0.]]
</td>
</tr>
<tr>
<td>
**b2**
</td>
<td>
[[ 0.]]
</td>
</tr>
</table>
Run the following code to train your model on 15,000 iterations using zeros initialization.
End of explanation
print ("predictions_train = " + str(predictions_train))
print ("predictions_test = " + str(predictions_test))
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
Explanation: The performance is really bad, and the cost does not really decrease, and the algorithm performs no better than random guessing. Why? Lets look at the details of the predictions and the decision boundary:
End of explanation
# GRADED FUNCTION: initialize_parameters_random
def initialize_parameters_random(layers_dims):
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
parameters = {}
L = len(layers_dims) # integer representing the number of layers
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1])*10
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = initialize_parameters_random([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
Explanation: The model is predicting 0 for every example.
In general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, and you might as well be training a neural network with $n^{[l]}=1$ for every layer, and the network is no more powerful than a linear classifier such as logistic regression.
<font color='blue'>
What you should remember:
- The weights $W^{[l]}$ should be initialized randomly to break symmetry.
- It is however okay to initialize the biases $b^{[l]}$ to zeros. Symmetry is still broken so long as $W^{[l]}$ is initialized randomly.
3 - Random initialization
To break symmetry, lets intialize the weights randomly. Following random initialization, each neuron can then proceed to learn a different function of its inputs. In this exercise, you will see what happens if the weights are intialized randomly, but to very large values.
Exercise: Implement the following function to initialize your weights to large random values (scaled by *10) and your biases to zeros. Use np.random.randn(..,..) * 10 for weights and np.zeros((.., ..)) for biases. We are using a fixed np.random.seed(..) to make sure your "random" weights match ours, so don't worry if running several times your code gives you always the same initial values for the parameters.
End of explanation
parameters = model(train_X, train_Y, initialization = "random")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
Explanation: Expected Output:
<table>
<tr>
<td>
**W1**
</td>
<td>
[[ 17.88628473 4.36509851 0.96497468]
[-18.63492703 -2.77388203 -3.54758979]]
</td>
</tr>
<tr>
<td>
**b1**
</td>
<td>
[[ 0.]
[ 0.]]
</td>
</tr>
<tr>
<td>
**W2**
</td>
<td>
[[-0.82741481 -6.27000677]]
</td>
</tr>
<tr>
<td>
**b2**
</td>
<td>
[[ 0.]]
</td>
</tr>
</table>
Run the following code to train your model on 15,000 iterations using random initialization.
End of explanation
print (predictions_train)
print (predictions_test)
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
Explanation: If you see "inf" as the cost after the iteration 0, this is because of numerical roundoff; a more numerically sophisticated implementation would fix this. But this isn't worth worrying about for our purposes.
Anyway, it looks like you have broken symmetry, and this gives better results. than before. The model is no longer outputting all 0s.
End of explanation
# GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1])*np.sqrt(2/layers_dims[l-1])
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = initialize_parameters_he([2, 4, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
Explanation: Observations:
- The cost starts very high. This is because with large random-valued weights, the last activation (sigmoid) outputs results that are very close to 0 or 1 for some examples, and when it gets that example wrong it incurs a very high loss for that example. Indeed, when $\log(a^{[3]}) = \log(0)$, the loss goes to infinity.
- Poor initialization can lead to vanishing/exploding gradients, which also slows down the optimization algorithm.
- If you train this network longer you will see better results, but initializing with overly large random numbers slows down the optimization.
<font color='blue'>
In summary:
- Initializing weights to very large random values does not work well.
- Hopefully intializing with small random values does better. The important question is: how small should be these random values be? Lets find out in the next part!
4 - He initialization
Finally, try "He Initialization"; this is named for the first author of He et al., 2015. (If you have heard of "Xavier initialization", this is similar except Xavier initialization uses a scaling factor for the weights $W^{[l]}$ of sqrt(1./layers_dims[l-1]) where He initialization would use sqrt(2./layers_dims[l-1]).)
Exercise: Implement the following function to initialize your parameters with He initialization.
Hint: This function is similar to the previous initialize_parameters_random(...). The only difference is that instead of multiplying np.random.randn(..,..) by 10, you will multiply it by $\sqrt{\frac{2}{\text{dimension of the previous layer}}}$, which is what He initialization recommends for layers with a ReLU activation.
End of explanation
parameters = model(train_X, train_Y, initialization = "he")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
Explanation: Expected Output:
<table>
<tr>
<td>
**W1**
</td>
<td>
[[ 1.78862847 0.43650985]
[ 0.09649747 -1.8634927 ]
[-0.2773882 -0.35475898]
[-0.08274148 -0.62700068]]
</td>
</tr>
<tr>
<td>
**b1**
</td>
<td>
[[ 0.]
[ 0.]
[ 0.]
[ 0.]]
</td>
</tr>
<tr>
<td>
**W2**
</td>
<td>
[[-0.03098412 -0.33744411 -0.92904268 0.62552248]]
</td>
</tr>
<tr>
<td>
**b2**
</td>
<td>
[[ 0.]]
</td>
</tr>
</table>
Run the following code to train your model on 15,000 iterations using He initialization.
End of explanation |
14,736 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Tuples and Lists
Tuples
A Python Tuple is an immutable
sequence of fixed sized. They are created using round brackets () with commas to separate the elements.
Step1: The elements of a tuple need not have the same type.
Step2: Extracting Elements from Tuples
Given a tuple it is possible to extract the elements from it in various ways. Note that Python uses 0-based indexing, meaning that the first element of a tuple is at position 0, the second element at position 1, and so on.
Step3: Note that this last example results in an error from attempting to extract an element that doesn't exist. It is also possible to extract the elements of a tuple as follows.
Step4: The immutable aspect of tuples will be explained in a bit.
Lists
A Python List is a mutable sequence. Unlike tuples they don't have a fixed size. They are created using square brackets [] with commas to separate the elements.
Step5: Lists can be added together to create larger lists.
Step6: Extracting Elements from Lists
Given a list it is possible to extract its elements in much the same way you would a tuple.
Step7: List Slicing
It's also possible to slice out chunks of a list.
Step8: Mutability
Lists are mutable, so let's mutate (ie. change) them.
Step9: Tuples and Mutability
Compare this behaviour to that of tuples.
Step10: Trying to add or change an element in a tuple results in an error. Tuples cannot be changed after they are constructed, hence they are immutable unlike lists.
Useful List Functions
range
The range function can be used to generate lists of equidistantly spaced integers in various forms.
Step11: zip
The zip function takes two or more lists and zips them together. This is easier to understand with an example.
Step12: Notice how the first elements of x and y are "zipped" together into a tuple in the new list, as are the second elements, and the third elements.
Step13: enumerate
The enumerate functions generates a list of of pairs (two element tuples) in which the first element is the index/position of the element and the second element is the element in the original list.
Step14: Mutability Gotchas
Step15: As expected lis_copy now has 9 at the end of it.
Step16: However now lis also has 9 at the end of it. The line
lis_copy = lis
makes lis_copy point to the same underlying list as lis. What's needed here is a copy of the list. There are many ways of copying a list in Python, one of which follows. | Python Code:
('x', 'y', 'z')
Explanation: Tuples and Lists
Tuples
A Python Tuple is an immutable
sequence of fixed sized. They are created using round brackets () with commas to separate the elements.
End of explanation
(1, 'b', 2.5)
Explanation: The elements of a tuple need not have the same type.
End of explanation
# Assigning a tuple to the variable tup
tup = ('first', 'second', 'third')
tup[0] # Extract the first element.
tup[1] # Extract the second element.
tup[2] # Extract the third element.
tup[3] # Extracting a non-existent element.
Explanation: Extracting Elements from Tuples
Given a tuple it is possible to extract the elements from it in various ways. Note that Python uses 0-based indexing, meaning that the first element of a tuple is at position 0, the second element at position 1, and so on.
End of explanation
a, b, c = tup
print(a)
print(b)
print(c)
Explanation: Note that this last example results in an error from attempting to extract an element that doesn't exist. It is also possible to extract the elements of a tuple as follows.
End of explanation
["a", "b", "c"]
Explanation: The immutable aspect of tuples will be explained in a bit.
Lists
A Python List is a mutable sequence. Unlike tuples they don't have a fixed size. They are created using square brackets [] with commas to separate the elements.
End of explanation
[1, 2] + [3, 4] + [5, 6]
Explanation: Lists can be added together to create larger lists.
End of explanation
# Creating a list and assigning it to the variable x.
lis = [1, 2, 3, 4, 5]
lis
lis[0] # Extract the first element.
lis[1] # Extract the second element.
lis[-1] # Extract the last element.
lis[-2] # Extract the second to last element.
Explanation: Extracting Elements from Lists
Given a list it is possible to extract its elements in much the same way you would a tuple.
End of explanation
lis[:3] # Extract the first three elements or equivalently
# extract elements up to (but not including) the fourth element.
lis[3:] # Drop the first three elements and return the rest or equivalently
# extract elements from the fourth element onwards.
lis[1:4] # Extract elements from the second element up to
# (but not including the fifth).
Explanation: List Slicing
It's also possible to slice out chunks of a list.
End of explanation
lis
# Adding an element to the end of a list.
lis.append(6)
lis
# Adding a list to the end of a list.
lis.extend([7,8,9])
lis
# Removing an element from the end of a list.
element = lis.pop()
(element, lis)
# Changing an element in a list.
lis[3] = 42
lis
Explanation: Mutability
Lists are mutable, so let's mutate (ie. change) them.
End of explanation
tup[0] = 0
tup.append("fourth")
Explanation: Tuples and Mutability
Compare this behaviour to that of tuples.
End of explanation
range(10)
range(5, 15)
range(4, 24, 2)
Explanation: Trying to add or change an element in a tuple results in an error. Tuples cannot be changed after they are constructed, hence they are immutable unlike lists.
Useful List Functions
range
The range function can be used to generate lists of equidistantly spaced integers in various forms.
End of explanation
x = ["a", "b", "c"]
y = [1 , 2, 3]
zip(x, y)
Explanation: zip
The zip function takes two or more lists and zips them together. This is easier to understand with an example.
End of explanation
zip(x, y, ["Do", "Re", "Mi"])
Explanation: Notice how the first elements of x and y are "zipped" together into a tuple in the new list, as are the second elements, and the third elements.
End of explanation
x
list(enumerate(x))
Explanation: enumerate
The enumerate functions generates a list of of pairs (two element tuples) in which the first element is the index/position of the element and the second element is the element in the original list.
End of explanation
lis
lis_copy = lis
lis_copy.append(9)
lis_copy
Explanation: Mutability Gotchas
End of explanation
lis
Explanation: As expected lis_copy now has 9 at the end of it.
End of explanation
lis_copy = lis[:]
lis_copy.pop()
print(lis)
print(lis_copy)
Explanation: However now lis also has 9 at the end of it. The line
lis_copy = lis
makes lis_copy point to the same underlying list as lis. What's needed here is a copy of the list. There are many ways of copying a list in Python, one of which follows.
End of explanation |
14,737 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
# Overall Summary
Step1: Monthly stats
Step2: Daily stats -- weekdays
Step3: There are weekly pattern in booking time, high from Monday to Fri, low in the Friday and weekend.
Monthly stats (Checkin and Checkout)
Step4: Daily stats -- weekdays (Checkin and Checkout) | Python Code:
daily_stats[['count_click', 'count_booking_train', 'count_booking_test']].sum()/1000
print 'booking ratio for train set: ', daily_stats.count_booking_train.sum() * 1.0 \
/ (daily_stats.count_click.sum() + daily_stats.count_booking_train.sum())
print 'daily booking in train set: ', daily_stats.count_booking_train.sum() * 1.0 \
/ len(daily_stats[daily_stats.count_booking_train != 0])
print 'daily click in train set: ', daily_stats.count_click.sum() * 1.0 \
/ len(daily_stats[daily_stats.count_click != 0])
print 'daily booking in test set: ', daily_stats.count_booking_test.sum() * 1.0 \
/ len(daily_stats[daily_stats.count_booking_test != 0])
Explanation: # Overall Summary
End of explanation
monthly_number_stats_booking_train = (daily_stats.groupby(("year", "month"))["count_booking_train"].sum()/1000)
monthly_number_stats_click_train = (daily_stats.groupby(("year", "month"))["count_click"].sum()/1000)
monthly_number_stats_booking_test = (daily_stats.groupby(("year", "month"))["count_booking_test"].sum()/1000)
fig = monthly_number_stats_booking_train.plot(kind='bar', alpha=0.5, figsize=(14, 8))
monthly_number_stats_click_train.plot(kind='bar', alpha=0.3, color = 'r', figsize=(14, 8))
monthly_number_stats_booking_test.plot(kind='bar', alpha=0.5, color = 'y', figsize=(14, 8))
fig.legend()
fig.set_title("Total Booking per Month")
fig.set_ylabel("Thousands of Bookings/Clicks")
fig.set_xlabel("(Year , Month)" )
Explanation: Monthly stats
End of explanation
import locale, calendar
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.tight_layout()
fig.set_size_inches(18.5,5.5)
dow = map(lambda x: calendar.day_abbr[x].capitalize(), daily_stats.index.dayofweek)
dow_order = map(lambda x: calendar.day_abbr[x].capitalize(), np.arange(0,7))
sns.boxplot(daily_stats.count_booking/1000, groupby=dow, order=dow_order, ax=axes[0])
axes[0].set_title("Total number of bookings by Week day")
axes[0].set_ylabel("Nubmer of bookings (Thousands)")
dow_clicks = map(lambda x: calendar.day_abbr[x].capitalize(), daily_stats[daily_stats.count_click!=0].index.dayofweek)
dow_clicks_order = map(lambda x: calendar.day_abbr[x].capitalize(), np.arange(0,7))
sns.boxplot(daily_stats[daily_stats.count_click!=0].count_click/1000., groupby=dow_clicks, order=dow_clicks_order, ax=axes[1])
axes[1].set_title("Total number of clicks by Week day")
axes[1].set_ylabel("Nubmer of clicks (Thousands)")
Explanation: Daily stats -- weekdays
End of explanation
table = 'public.srch_ci_daily_stats'
daily_stats_ci = get_dataframe(
'''select * from %s where year between 2013 and 2016''' % table
)
daily_stats_ci.index = pd.to_datetime(daily_stats_ci.year*10000 + daily_stats_ci.month*100 + daily_stats_ci.day, format='%Y%m%d')
table = 'public.srch_co_daily_stats'
daily_stats_co = get_dataframe(
'''select * from %s where year between 2013 and 2016''' % table
)
daily_stats_co.index = pd.to_datetime(daily_stats_co.year*10000 + daily_stats_co.month*100 + daily_stats_co.day, format='%Y%m%d')
monthly_number_stats_ci_booking_train = (daily_stats_ci.groupby(("year", "month"))["count_booking_train"].sum()/1000)
monthly_number_stats_ci_click_train = (daily_stats_ci.groupby(("year", "month"))["count_click"].sum()/1000)
monthly_number_stats_ci_booking_test = (daily_stats_ci.groupby(("year", "month"))["count_booking_test"].sum()/1000)
monthly_number_stats_co_booking_train = (daily_stats_co.groupby(("year", "month"))["count_booking_train"].sum()/1000)
monthly_number_stats_co_click_train = (daily_stats_co.groupby(("year", "month"))["count_click"].sum()/1000)
monthly_number_stats_co_booking_test = (daily_stats_co.groupby(("year", "month"))["count_booking_test"].sum()/1000)
fig = monthly_number_stats_ci_booking_train.plot(kind='bar', alpha=0.5, figsize=(14, 8))
monthly_number_stats_ci_click_train.plot(kind='bar', alpha=0.3, color = 'r', figsize=(14, 8))
monthly_number_stats_ci_booking_test.plot(kind='bar', alpha=0.5, color = 'y', figsize=(14, 8))
fig.legend()
fig.set_title("Total Booking per Month (Checkin)")
fig.set_ylabel("Thousands of Bookings/Clicks")
fig.set_xlabel("(Year , Month)" )
fig = monthly_number_stats_co_booking_train.plot(kind='bar', alpha=0.5, figsize=(14, 8))
monthly_number_stats_co_click_train.plot(kind='bar', alpha=0.3, color = 'r', figsize=(14, 8))
monthly_number_stats_co_booking_test.plot(kind='bar', alpha=0.5, color = 'y', figsize=(14, 8))
fig.legend()
fig.set_title("Total Booking per Month (Checkout)")
fig.set_ylabel("Thousands of Bookings/Clicks")
fig.set_xlabel("(Year , Month)" )
Explanation: There are weekly pattern in booking time, high from Monday to Fri, low in the Friday and weekend.
Monthly stats (Checkin and Checkout)
End of explanation
import locale, calendar
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.tight_layout()
fig.set_size_inches(18.5,5.5)
dow = map(lambda x: calendar.day_abbr[x].capitalize(), daily_stats_ci.index.dayofweek)
dow_order = map(lambda x: calendar.day_abbr[x].capitalize(), np.arange(0,7))
sns.boxplot(daily_stats_ci.count_booking/1000, groupby=dow, order=dow_order, ax=axes[0])
axes[0].set_title("Total number of bookings by Week day (Checkin)")
axes[0].set_ylabel("Nubmer of bookings (Thousands)")
dow_clicks = map(lambda x: calendar.day_abbr[x].capitalize(), daily_stats_ci[daily_stats_ci.count_click!=0].index.dayofweek)
dow_clicks_order = map(lambda x: calendar.day_abbr[x].capitalize(), np.arange(0,7))
sns.boxplot(daily_stats_ci[daily_stats_ci.count_click!=0].count_click/1000., groupby=dow_clicks, order=dow_clicks_order, ax=axes[1])
axes[1].set_title("Total number of clicks by Week day(Checkin)")
axes[1].set_ylabel("Nubmer of clicks (Thousands)")
import locale, calendar
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.tight_layout()
fig.set_size_inches(18.5,5.5)
dow = map(lambda x: calendar.day_abbr[x].capitalize(), daily_stats_co.index.dayofweek)
dow_order = map(lambda x: calendar.day_abbr[x].capitalize(), np.arange(0,7))
sns.boxplot(daily_stats_co.count_booking/1000, groupby=dow, order=dow_order, ax=axes[0])
axes[0].set_title("Total number of bookings by Week day (Checkout)")
axes[0].set_ylabel("Nubmer of bookings (Thousands)")
dow_clicks = map(lambda x: calendar.day_abbr[x].capitalize(), daily_stats_co[daily_stats_co.count_click!=0].index.dayofweek)
dow_clicks_order = map(lambda x: calendar.day_abbr[x].capitalize(), np.arange(0,7))
sns.boxplot(daily_stats_co[daily_stats_co.count_click!=0].count_click/1000., groupby=dow_clicks, order=dow_clicks_order, ax=axes[1])
axes[1].set_title("Total number of clicks by Week day(Checkout)")
axes[1].set_ylabel("Nubmer of clicks (Thousands)")
Explanation: Daily stats -- weekdays (Checkin and Checkout)
End of explanation |
14,738 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Exercise Answer Key
Step1: Helper Functions
Step2: Exercise 1
Step3: b. $1 Bets
By running 1000 simulations, find the mean and standard deviation of the payout if instead you bet $1 at a time and play 100 rounds.
Step4: Exercise 2
Step5: b. Equally Weighted Portfolio
Create an equally weighted portfolio of the following 10 stocks, find the standard deviation of the portfolio's returns, and then plot the returns for the second half of 2015 along with the AMZN returns from above. Putting AMZN in a portfolio of 19 other securities should diversify the idiosyncratic risk and lower the price variability.
Hint
Step6: c. Market Weighted Portfolio
Create a new portfolio of the same assets, this time weighted by market capitalization, find the standard deviation of the portfolio returns, and then plot the portfolio returns along with both results from above. Weighting using market capitalization brings us closer to the theoretical efficient portfolio, a portfolio of investments containing every single asset on the market, each weighted proportionately to its presence in the market.
The market cap is found using a pipeline factor, the steps for which are below.
Step7: d. Markowitz Portfolio
Create a new portfolio of the same assets, this time using the get_markowitz_weights helper function to create the Markowitz mean-variance portfolio. Use the pricing data from the first half of 2015 to calibrate the weights, and then plot the portfolio returns for the second half of 2015.
Important Note
If the weights from the lookback window (6 prior months), are correlated with the weights of the forward window (6 following months), then this optimization should be helpful in reducing out portfolio volatility going forward. However, this is often not the case in real life. Real markets are complicated, and historical volatility may not be a good predictor of future volatility. Volatility forecasting models are an entire area of research in finance, so don't think that just because historic volatility of your portfolio was low, it will be equally low in the future. This is just one technique that attempts to control portfolio risk, there is a more complete discussion of this in this lecture | Python Code:
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import math
import cvxpy
Explanation: Exercise Answer Key: Position Concentration Risk
Lecture Link
This exercise notebook refers to this lecture. Please use the lecture for explanations and sample code.
https://www.quantopian.com/lectures#Position-Concentration-Risk
Part of the Quantopian Lecture Series:
www.quantopian.com/lectures
github.com/quantopian/research_public
End of explanation
def get_markowitz_weights(mu, Sigma, gamma=1, max_position=1.0, max_leverage=1.0, short=False):
w = cvxpy.Variable(len(Sigma))
g = cvxpy.Parameter(sign='positive')
L = cvxpy.Parameter()
g.value = gamma
L.value = max_leverage
try:
ret = mu.T*w
except ValueError:
ret = mu*w
risk = cvxpy.quad_form(w, Sigma)
objective = cvxpy.Maximize(ret - g*risk)
constraints = [
cvxpy.abs(w) < max_position,
cvxpy.norm(w, 1) <= L, # Make it so we don't have to invest everything
]
if not short:
constraints.append(w >= 0) # Force all positive weights
prob = cvxpy.Problem(
objective,
constraints
)
result = prob.solve()
return w.value
Explanation: Helper Functions
End of explanation
universes = 1000
evens = 19
total = 38
payout = 100
rounds = 1
results = np.zeros(universes)
#Your code goes here
p = float(19)/total
for i in range(universes):
results[i] = payout * np.random.binomial(n = rounds, p = p)
print "Payout mean:", np.mean(results)
print "Payout std:", np.std(results)
Explanation: Exercise 1: Roulette Simulation
A roulette table has 38 pockets: 1 through 36, 0, and 00. A bet on an even number pays out at a ratio of 1:1. Landing on 0 and 00 count as losing.
You have $100 and are betting on an even number.
a. All In
By running 1000 simulations, find the mean and standard deviation of the payout if you bet your entire $100 on one round.
End of explanation
universes = 1000
evens = 19
total = 38
payout = 1
rounds = 100
results = np.zeros(universes)
#Your code goes here
p = float(19)/total
for i in range(universes):
results[i] = payout * np.random.binomial(n = rounds, p = p)
print "Payout mean:", np.mean(results)
print "Payout std:", np.std(results)
Explanation: b. $1 Bets
By running 1000 simulations, find the mean and standard deviation of the payout if instead you bet $1 at a time and play 100 rounds.
End of explanation
time_start = '2015-01-01'
time_halfway = '2015-07-01'
time_end = '2016-01-01'
AMZN_r = get_pricing('AMZN', fields='price', start_date=time_start, end_date=time_end).pct_change()[1:]
X = np.linspace(0, len(AMZN_r), len(AMZN_r))
#Your code goes here
print "AMZN returns std:", np.std(AMZN_r.loc[time_halfway:])
AMZN_r.plot(alpha = 0.5);
plt.legend();
Explanation: Exercise 2: Portfolio Diversification
a. Single Asset
Use the pricing data below to find the standard deviation of the returns of AMZN in the second half of the year 2015 and plot the price against time.
End of explanation
symbol_list = ['BEN', 'SYMC', 'IP', 'SWKS', 'IVZ', 'MJN', 'WMB', 'LB', 'TWX', 'NFX', 'PFE', 'LLY', 'HP', 'JPM', 'CXO', 'TJX', 'CAG', 'BBT', 'ATVI', 'NFLX']
prices_df = get_pricing(symbol_list, fields=['price']
, start_date=time_start, end_date=time_end)['price']
prices_df.columns = map(lambda x: x.symbol, prices_df.columns)
eweights_df = len(symbol_list) * [float(1)/len(symbol_list)]
returns_df = prices_df.pct_change(1)[1:]
#Your code goes here
returns_df['EWP'] = returns_df[symbol_list].dot(eweights_df)
print "AMZN returns std:", np.std(AMZN_r.loc[time_halfway:])
print "Portfolio returns std:", np.std(returns_df['EWP'].loc[time_halfway:])
AMZN_r.plot(alpha = 0.5);
returns_df['EWP'].loc[time_halfway:].plot();
plt.legend();
Explanation: b. Equally Weighted Portfolio
Create an equally weighted portfolio of the following 10 stocks, find the standard deviation of the portfolio's returns, and then plot the returns for the second half of 2015 along with the AMZN returns from above. Putting AMZN in a portfolio of 19 other securities should diversify the idiosyncratic risk and lower the price variability.
Hint: To calculate weighted returns dot the weight matrix eweights_df with the splice of the returns matrix containing the symbol_list pricing data (returns_df[symbol_list]).
End of explanation
#Pipeline Setup
from quantopian.research import run_pipeline
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data import morningstar
from quantopian.pipeline.factors import CustomFactor
from quantopian.pipeline.classifiers.morningstar import Sector
from quantopian.pipeline.filters import QTradableStocksUS
from time import time
universe = QTradableStocksUS()
pipe = Pipeline(columns = {'Market Cap' : morningstar.valuation.market_cap.latest},
screen=universe
)
start_timer = time()
results = run_pipeline(pipe, time_start, time_end)
end_timer = time()
results.fillna(value=0);
print "Time to run pipeline %.2f secs" % (end_timer - start_timer)
# This is important as sometimes the first data returned won't be on the specified start date
first_trading_day = results.index.levels[0][1]
market_cap = results.loc[first_trading_day]['Market Cap']
market_cap.index = [x.symbol for x in market_cap.index]#pd.MultiIndex.from_tuples([(x[0], x[1].symbol) for x in market_cap.index])
mcs = market_cap # pd.DataFrame(market_cap.loc[(first_trading_day,)].loc[symbol_list]).transpose()
mweights = (mcs[symbol_list]/sum(mcs[symbol_list])).transpose()
#Your code goes here
returns_df['MWP'] = returns_df[symbol_list].dot(mweights)
print "AMZN returns std:", np.std(AMZN_r.loc[time_halfway:])
print "EWP returns std:", np.std(returns_df['EWP'].loc[time_halfway:])
print "MWP returns std:", np.std(returns_df['MWP'].loc[time_halfway:])
AMZN_r[time_halfway:].plot(alpha = 0.5);
returns_df['EWP'].loc[time_halfway:].plot(alpha = 0.5);
returns_df['MWP'].loc[time_halfway:].plot();
plt.legend();
Explanation: c. Market Weighted Portfolio
Create a new portfolio of the same assets, this time weighted by market capitalization, find the standard deviation of the portfolio returns, and then plot the portfolio returns along with both results from above. Weighting using market capitalization brings us closer to the theoretical efficient portfolio, a portfolio of investments containing every single asset on the market, each weighted proportionately to its presence in the market.
The market cap is found using a pipeline factor, the steps for which are below.
End of explanation
mu = returns_df[symbol_list].\
loc[:time_halfway].fillna(0).mean().as_matrix()
sigma = returns_df[symbol_list].\
loc[:time_halfway].fillna(0).cov().as_matrix()
mkweights_df = get_markowitz_weights(mu, sigma)
#Your code goes here
returns_df['MKW'] = returns_df[symbol_list].dot(mkweights_df)
print "AMZN returns std:", np.std(AMZN_r.loc[time_halfway:])
print "EWP returns std:", np.std(returns_df['EWP'].loc[time_halfway:])
print "MWP returns std:", np.std(returns_df['MWP'].loc[time_halfway:])
print "MKW returns std:", np.std(returns_df['MKW'].loc[time_halfway:]), "\n"
AMZN_r.loc[time_halfway:].plot(alpha = 0.5);
returns_df['EWP'].loc[time_halfway:].plot(alpha = 0.5);
returns_df['MWP'].loc[time_halfway:].plot(alpha = 0.5);
returns_df['MKW'].loc[time_halfway:].plot();
plt.legend();
Explanation: d. Markowitz Portfolio
Create a new portfolio of the same assets, this time using the get_markowitz_weights helper function to create the Markowitz mean-variance portfolio. Use the pricing data from the first half of 2015 to calibrate the weights, and then plot the portfolio returns for the second half of 2015.
Important Note
If the weights from the lookback window (6 prior months), are correlated with the weights of the forward window (6 following months), then this optimization should be helpful in reducing out portfolio volatility going forward. However, this is often not the case in real life. Real markets are complicated, and historical volatility may not be a good predictor of future volatility. Volatility forecasting models are an entire area of research in finance, so don't think that just because historic volatility of your portfolio was low, it will be equally low in the future. This is just one technique that attempts to control portfolio risk, there is a more complete discussion of this in this lecture:
https://www.quantopian.com/lectures/risk-constrained-portfolio-optimization
End of explanation |
14,739 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Exercise 5.1
Step1: Optimization
Maximum likelihood optimization is a statistical method for finding the best fitting set of parameters for a model. A likelihood function can be made up of many independent likelihood functions, where each describes the result of a trial relative to a statistical distribution. Let's consider a simple example of a coin flip, and estimating whether the coin is fair (i.e., whether p=0.5).
Likelihood equation
The likelihood is a statement about probability. For a coin toss, there are two possible outcomes
Step2: Making things more concise
We can describe the same calculation above more concisely using exponents. So from now on we'll describe the product of n independent trials with probability p as $p^n$.
Step3: The goal of Maximum likelihood
Step4: Here we print the parameter of value of p used to calculate the likelihood, and the likelihood score next to each other on each line. You can see that the value of 0.6 has the highest likelihood... but it's kind of hard to interpret because all of the likelihood values are such small numbers.
Step5: For this reason, people usually look at the negative log of the likelihood, which is easier to interpret. Although the method is called "maximum likelihood", when working with the negative log-likelihood we are actually trying to minimize this score, which still means finding the parameter that best fits the data. Below you can see that for p=0.6 the -loglik score is lowest (68.32).
Step6: Functions
A cleaner and easier way to calculate the log-likelihood for our equation when using computer programs is to write a function. This is simply a tool that we can reuse over and over again to perform the same task of computing a result given some input variables.
Step7: Exhaustive parameter search
There are much more advanced methods for finding exact optimized parameters than to simply plug in every possible value by hand. The function below is from the scipy library and is a maximum likelihood optimization algorithm. It proposes new parameters based on the scores of the ones it searches previously, and keeps going until it finds the best value. For a simple problem like this it is super fast. For much more complex problems these computations can become quite computationally intensive.
In the example below I give run the maximum likelihood (ML) optimization for our likelihood function when the data (args) is 50 heads and 200 tails. The ML optimized parameter value of p is 0.2, which sounds correct, since 50/250 trials is 20%.
Step8: Here is another trial where we enter a different set of observations. Now when the data is 133 heads and 385 tails the ML parameter estimate of p is 0.2567.
Step9: Plot the likelihood over different parameter inputs
The first plot shows the likelihood calculated at different values for p between 0 and 1 when the observed data is heads=50, tails=200. It finds the optimum value for p at around 0.2. The second plot shows the likelihood when the observed data is heads=50, tails=50, and the optimum looks very close to 0.5. | Python Code:
import scipy.optimize as so
import numpy
import toyplot
Explanation: Exercise 5.1: Likelihood model optimization
This exercise uses the Python programming language. We will make use of the statistical libraries scipy and numpy to generate data under a parametric model (a model that takes one or more variables as inputs which affect its results) to learn how likelihood can help us to estimate the correct values of parameters given data generated under a model.
End of explanation
# if the coin is fair (p=0.5) then the probability isn't very high
p = 0.5
p * p * p * p * p
# but if the coin is really unfair then the probability if quite high
p = 0.99
p * p * p * p * p
Explanation: Optimization
Maximum likelihood optimization is a statistical method for finding the best fitting set of parameters for a model. A likelihood function can be made up of many independent likelihood functions, where each describes the result of a trial relative to a statistical distribution. Let's consider a simple example of a coin flip, and estimating whether the coin is fair (i.e., whether p=0.5).
Likelihood equation
The likelihood is a statement about probability. For a coin toss, there are two possible outcomes: heads or tails. If we observe 10 coin tosses and learn that heads came up 5 times, and tails came up 5 times, we can try to learn from this what the probability is of each of the possible outcomes. Sounds like the probability is around 0.5, right? But what if we observe 38 heads and 46 tails, should we still think the true probability is around 0.5? Well, we can use likelihood to tell us.
Since there are only two possible outcomes to a coin toss we know that the probability of flipping heads (p) plus the probability of flipping tails (q) must sum to 1 (p + q = 1). This is called the joint probability distribution of our model. Because the probability of flipping tails (q) is simply 1 - p, it is fully conditional on p, and so in reality there is only one parameter to this model. Our goal then, put simply, is to estimate the probability that one event will come up heads (p).
Calculating likelihoods
Let's say the true probability of flipping heads for a coin is 0.5. Without knowing this, we can try to test if this is true by calculating the likelihood of a series of coin flips that are performed using this coin. We are observing data and trying to estimate a parameter of the model that is most likely to produce the results we observe.
Example
Probability theory tells us that the probability of many independent events is the product of their individual probabilities. In other words, the probability of five coin flips in a row coming up heads is p * p * p * p * p. Let's look at two examples. If the coin is fair (p=0.5) then the probability of five heads in a row is quite low (0.03), but if the coin is totally rigged (p=0.99) then it is super likely that we will see five heads in a row (0.95). From these observations, we could probably make a guess as whether the coin toss is fair or biased, but the more tosses we do the more accurate our estimate will be.
End of explanation
# the probability of observing 20 heads for a coin with p=0.6
p = 0.6
n = 20
p**n
# the probability of observing 10 heads and 10 tails for p=0.6
p = 0.6
q = 1 - p
np = 10
nq = 10
p**np * q**nq
Explanation: Making things more concise
We can describe the same calculation above more concisely using exponents. So from now on we'll describe the product of n independent trials with probability p as $p^n$.
End of explanation
# our observed data
np = 62
nq = 40
Explanation: The goal of Maximum likelihood:
We want to find the best possible parameter to explain our observed data. This can be done in one of two ways: mathematically or empirically. Mathematically, if a likelihood equation is easy to solve then we can take the derivative of the equation and set it equal to zero and solve for our parameter. However, for many complex likelihood functions this is too difficult to solve. Therefore, a frequent approach is to maximize the likelihood empirically by trying many different values using a heuristic search. Computers are great for this. Since this is the practice in phylogenetics we will focus on heuristic optimization search.
End of explanation
# let's see which parameter for p best fits the data
for p in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
likelihood = p**np * (1-p)**nq
print("p={}; likelihood={}".format(p, likelihood))
Explanation: Here we print the parameter of value of p used to calculate the likelihood, and the likelihood score next to each other on each line. You can see that the value of 0.6 has the highest likelihood... but it's kind of hard to interpret because all of the likelihood values are such small numbers.
End of explanation
# let's see which parameter for p best fits the data
for p in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
likelihood = p**np * (1-p)**nq
print("p={}; -loglik={:.2f}".format(p, -numpy.log(likelihood)))
Explanation: For this reason, people usually look at the negative log of the likelihood, which is easier to interpret. Although the method is called "maximum likelihood", when working with the negative log-likelihood we are actually trying to minimize this score, which still means finding the parameter that best fits the data. Below you can see that for p=0.6 the -loglik score is lowest (68.32).
End of explanation
def coin_flip_log(p, nheads, ntails):
## calculate likelihood
logp = nheads*numpy.log(p) + ntails*numpy.log(1.-p)
## return negative log-likelihood
return -1*logp
coin_flip_log(0.5, 100, 100)
Explanation: Functions
A cleaner and easier way to calculate the log-likelihood for our equation when using computer programs is to write a function. This is simply a tool that we can reuse over and over again to perform the same task of computing a result given some input variables.
End of explanation
# starting value=0.5; observed flips = (50, 200)
so.fmin(coin_flip_log, x0=(0.5), args=(50, 200), disp=0)[0]
Explanation: Exhaustive parameter search
There are much more advanced methods for finding exact optimized parameters than to simply plug in every possible value by hand. The function below is from the scipy library and is a maximum likelihood optimization algorithm. It proposes new parameters based on the scores of the ones it searches previously, and keeps going until it finds the best value. For a simple problem like this it is super fast. For much more complex problems these computations can become quite computationally intensive.
In the example below I give run the maximum likelihood (ML) optimization for our likelihood function when the data (args) is 50 heads and 200 tails. The ML optimized parameter value of p is 0.2, which sounds correct, since 50/250 trials is 20%.
End of explanation
# starting value=0.5; observed flips = (133, 385)
so.fmin(coin_flip_log, x0=(0.5), args=(133, 385), disp=0)[0]
Explanation: Here is another trial where we enter a different set of observations. Now when the data is 133 heads and 385 tails the ML parameter estimate of p is 0.2567.
End of explanation
## generate data across 100 equally spaced points for lambda
data = [coin_flip_log(p, 50, 200) for p in numpy.linspace(0.01, 0.99, 100)]
## plot the likelihood surface
toyplot.plot(
b=numpy.log(data),
a=numpy.linspace(0.01, 0.99, 100),
width=500, height=300,
ylabel="-log-likelihood",
xlabel="probability of heads");
## generate data across 100 equally spaced points for lambda
data = [coin_flip_log(p, 50, 50) for p in numpy.linspace(0.01, 0.99, 100)]
## plot the likelihood surface
toyplot.plot(
b=numpy.log(data),
a=numpy.linspace(0.01, 0.99, 100),
width=500, height=300,
ylabel="-log-likelihood",
xlabel="probability of heads");
Explanation: Plot the likelihood over different parameter inputs
The first plot shows the likelihood calculated at different values for p between 0 and 1 when the observed data is heads=50, tails=200. It finds the optimum value for p at around 0.2. The second plot shows the likelihood when the observed data is heads=50, tails=50, and the optimum looks very close to 0.5.
End of explanation |
14,740 | Given the following text description, write Python code to implement the functionality described.
Description:
Check whether product of digits at even places is divisible by sum of digits at odd place of a number
Below function checks whether product of digits at even places is divisible by sum of digits at odd places ; Converting integer to string ; Traveersing the string ; Driver code
| Python Code:
def productSumDivisible(n ) :
sum = 0
product = 1
num = str(n )
for i in range(len(num ) ) :
if(i % 2 != 0 ) :
product = product * int(num[i ] )
else :
sum = sum + int(num[i ] )
if(product % sum == 0 ) :
return True
return False
if __name__== "__main __":
n = 1234
if(productSumDivisible(n ) ) :
print("TRUE ")
else :
print("FALSE ")
|
14,741 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Five programming problems every Software Engineer should be able to solve in less than 1 hour
http
Step1: Problem 2
Write a function that combines two lists by alternatingly taking elements. For example
Step2: Problem 3
Write a function that computes the list of the first 100 Fibonacci numbers. By definition, the first two numbers in the Fibonacci sequence are 0 and 1, and each subsequent number is the sum of the previous two. As an example, here are the first 10 Fibonnaci numbers
Step3: Problem 4
Write a function that given a list of non negative integers, arranges them such that they form the largest possible number. For example, given [50, 2, 1, 9], the largest formed number is 95021. | Python Code:
a = [1,2,3,4,5,6,7]
def adding(x):
total = 0
for i in x:
total += i
print(total)
total = 0
i = 0
while i < len(x):
total += x[i]
i += 1
print(total)
return
adding(a)
b = [1,2,3,4,5,6,7]
def recur(lst,counter,total):
if counter == len(lst):
print(total)
return
else:
total += lst[counter]
counter += 1
return recur(lst,counter,total)
recur(b,0,0)
Explanation: Five programming problems every Software Engineer should be able to solve in less than 1 hour
http://www.shiftedup.com/2015/05/07/five-programming-problems-every-software-engineer-should-be-able-to-solve-in-less-than-1-hour
Problem 1
Write three functions that compute the sum of the numbers in a given list using a for-loop, a while-loop, and recursion.
End of explanation
lst1=['a','b','c']
lst2=[1,2,3]
def combine(lst1,lst2):
lst = []
i = 0
while i < len(lst1):
lst.append(lst1[i])
lst.append(lst2[i])
i += 1
lst
print(lst)
return
combine(lst1,lst2)
Explanation: Problem 2
Write a function that combines two lists by alternatingly taking elements. For example: given the two lists [a, b, c] and [1, 2, 3], the function should return [a, 1, b, 2, c, 3].
End of explanation
# fibonacci numbers 1-100
def fibonacci(i):
k = 0
num0 = 0
num1 = 1
print(num0)
print(num1)
while k < i:
next_num = num0 + num1
print(next_num)
num0 = num1
num1 = next_num
k += 1
fibonacci(98)
# fibonacci numbers 1-100
def fibonacci(i):
fib = [0,1]
k = 0
while len(fib) < i:
next_num = fib[k] + fib[k+1]
fib.append(next_num)
k+=1
print(fib)
fibonacci(100)
Explanation: Problem 3
Write a function that computes the list of the first 100 Fibonacci numbers. By definition, the first two numbers in the Fibonacci sequence are 0 and 1, and each subsequent number is the sum of the previous two. As an example, here are the first 10 Fibonnaci numbers: 0, 1, 1, 2, 3, 5, 8, 13, 21, and 34.
End of explanation
98750301291
#this will search for the whole list 1x for i_dwn
def list_sort(lst):
iterations = 0
while 1:
i = 0
j = i+1
value_before = make_num(lst[:])
while j < len(lst):
n0 = (int(str(lst[i]) + str(lst[j])))
n1 = (int(str(lst[j]) + str(lst[i])))
print('lst:',lst)
if n0 < n1:
lst[i],lst[j] = lst[j],lst[i]
i+=1
j=i+1
iterations += 1
value_after = make_num(lst[:])
if value_before < value_after:
continue
else:
print('before:',value_before)
print('after:',value_after)
print('iterations:',iterations)
break
return
# function to print the list as a number
def make_num(lst_sorted):
s_total = ''
for i in lst_sorted:
s_total += str(i)
return int(s_total)
#lst = [5, 50, 56]
lst = [17, 32, 91, 7, 46]
# this calls the initial sort
list_sort(lst)
Explanation: Problem 4
Write a function that given a list of non negative integers, arranges them such that they form the largest possible number. For example, given [50, 2, 1, 9], the largest formed number is 95021.
End of explanation |
14,742 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step1: Convolutional Networks
So far we have worked with deep fully-connected networks, using them to explore different optimization strategies and network architectures. Fully-connected networks are a good testbed for experimentation because they are very computationally efficient, but in practice all state-of-the-art results use convolutional networks instead.
First you will implement several layer types that are used in convolutional networks. You will then use these layers to train a convolutional network on the CIFAR-10 dataset.
Step2: Convolution
Step4: Aside
Step5: Convolution
Step6: Max pooling
Step7: Max pooling
Step8: Fast layers
Making convolution and pooling layers fast can be challenging. To spare you the pain, we've provided fast implementations of the forward and backward passes for convolution and pooling layers in the file cs231n/fast_layers.py.
The fast convolution implementation depends on a Cython extension; to compile it you need to run the following from the cs231n directory
Step9: Convolutional "sandwich" layers
Previously we introduced the concept of "sandwich" layers that combine multiple operations into commonly used patterns. In the file cs231n/layer_utils.py you will find sandwich layers that implement a few commonly used patterns for convolutional networks.
Step10: Three-layer ConvNet
Now that you have implemented all the necessary layers, we can put them together into a simple convolutional network.
Open the file cs231n/classifiers/cnn.py and complete the implementation of the ThreeLayerConvNet class. Run the following cells to help you debug
Step11: Gradient check
After the loss looks reasonable, use numeric gradient checking to make sure that your backward pass is correct. When you use numeric gradient checking you should use a small amount of artifical data and a small number of neurons at each layer. Note
Step12: Overfit small data
A nice trick is to train your model with just a few training samples. You should be able to overfit small datasets, which will result in very high training accuracy and comparatively low validation accuracy.
Step13: Plotting the loss, training accuracy, and validation accuracy should show clear overfitting
Step14: Train the net
By training the three-layer convolutional network for one epoch, you should achieve greater than 40% accuracy on the training set
Step15: Visualize Filters
You can visualize the first-layer convolutional filters from the trained network by running the following
Step16: Spatial Batch Normalization
We already saw that batch normalization is a very useful technique for training deep fully-connected networks. Batch normalization can also be used for convolutional networks, but we need to tweak it a bit; the modification will be called "spatial batch normalization."
Normally batch-normalization accepts inputs of shape (N, D) and produces outputs of shape (N, D), where we normalize across the minibatch dimension N. For data coming from convolutional layers, batch normalization needs to accept inputs of shape (N, C, H, W) and produce outputs of shape (N, C, H, W) where the N dimension gives the minibatch size and the (H, W) dimensions give the spatial size of the feature map.
If the feature map was produced using convolutions, then we expect the statistics of each feature channel to be relatively consistent both between different imagesand different locations within the same image. Therefore spatial batch normalization computes a mean and variance for each of the C feature channels by computing statistics over both the minibatch dimension N and the spatial dimensions H and W.
Spatial batch normalization
Step17: Spatial batch normalization | Python Code:
# As usual, a bit of setup
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.cnn import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient_array, eval_numerical_gradient
from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.items():
print('%s: ' % k, v.shape)
Explanation: Convolutional Networks
So far we have worked with deep fully-connected networks, using them to explore different optimization strategies and network architectures. Fully-connected networks are a good testbed for experimentation because they are very computationally efficient, but in practice all state-of-the-art results use convolutional networks instead.
First you will implement several layer types that are used in convolutional networks. You will then use these layers to train a convolutional network on the CIFAR-10 dataset.
End of explanation
x_shape = (2, 3, 4, 4)
w_shape = (3, 3, 4, 4)
x = np.linspace(-0.1, 0.5, num=np.prod(x_shape)).reshape(x_shape)
w = np.linspace(-0.2, 0.3, num=np.prod(w_shape)).reshape(w_shape)
b = np.linspace(-0.1, 0.2, num=3)
conv_param = {'stride': 2, 'pad': 1}
out, _ = conv_forward_naive(x, w, b, conv_param)
correct_out = np.array([[[[-0.08759809, -0.10987781],
[-0.18387192, -0.2109216 ]],
[[ 0.21027089, 0.21661097],
[ 0.22847626, 0.23004637]],
[[ 0.50813986, 0.54309974],
[ 0.64082444, 0.67101435]]],
[[[-0.98053589, -1.03143541],
[-1.19128892, -1.24695841]],
[[ 0.69108355, 0.66880383],
[ 0.59480972, 0.56776003]],
[[ 2.36270298, 2.36904306],
[ 2.38090835, 2.38247847]]]])
# Compare your output to ours; difference should be around 2e-8
print('Testing conv_forward_naive')
print('difference: ', rel_error(out, correct_out))
Explanation: Convolution: Naive forward pass
The core of a convolutional network is the convolution operation. In the file cs231n/layers.py, implement the forward pass for the convolution layer in the function conv_forward_naive.
You don't have to worry too much about efficiency at this point; just write the code in whatever way you find most clear.
You can test your implementation by running the following:
End of explanation
from scipy.misc import imread, imresize
kitten, puppy = imread('kitten.jpg'), imread('puppy.jpg')
# kitten is wide, and puppy is already square
d = kitten.shape[1] - kitten.shape[0]
kitten_cropped = kitten[:, d//2:-d//2, :]
img_size = 200 # Make this smaller if it runs too slow
x = np.zeros((2, 3, img_size, img_size))
x[0, :, :, :] = imresize(puppy, (img_size, img_size)).transpose((2, 0, 1))
x[1, :, :, :] = imresize(kitten_cropped, (img_size, img_size)).transpose((2, 0, 1))
# Set up a convolutional weights holding 2 filters, each 3x3
w = np.zeros((2, 3, 3, 3))
# The first filter converts the image to grayscale.
# Set up the red, green, and blue channels of the filter.
w[0, 0, :, :] = [[0, 0, 0], [0, 0.3, 0], [0, 0, 0]]
w[0, 1, :, :] = [[0, 0, 0], [0, 0.6, 0], [0, 0, 0]]
w[0, 2, :, :] = [[0, 0, 0], [0, 0.1, 0], [0, 0, 0]]
# Second filter detects horizontal edges in the blue channel.
w[1, 2, :, :] = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]
# Vector of biases. We don't need any bias for the grayscale
# filter, but for the edge detection filter we want to add 128
# to each output so that nothing is negative.
b = np.array([0, 128])
# Compute the result of convolving each input in x with each filter in w,
# offsetting by b, and storing the results in out.
out, _ = conv_forward_naive(x, w, b, {'stride': 1, 'pad': 1})
def imshow_noax(img, normalize=True):
Tiny helper to show images as uint8 and remove axis labels
if normalize:
img_max, img_min = np.max(img), np.min(img)
img = 255.0 * (img - img_min) / (img_max - img_min)
plt.imshow(img.astype('uint8'))
plt.gca().axis('off')
# Show the original images and the results of the conv operation
plt.subplot(2, 3, 1)
imshow_noax(puppy, normalize=False)
plt.title('Original image')
plt.subplot(2, 3, 2)
imshow_noax(out[0, 0])
plt.title('Grayscale')
plt.subplot(2, 3, 3)
imshow_noax(out[0, 1])
plt.title('Edges')
plt.subplot(2, 3, 4)
imshow_noax(kitten_cropped, normalize=False)
plt.subplot(2, 3, 5)
imshow_noax(out[1, 0])
plt.subplot(2, 3, 6)
imshow_noax(out[1, 1])
plt.show()
Explanation: Aside: Image processing via convolutions
As fun way to both check your implementation and gain a better understanding of the type of operation that convolutional layers can perform, we will set up an input containing two images and manually set up filters that perform common image processing operations (grayscale conversion and edge detection). The convolution forward pass will apply these operations to each of the input images. We can then visualize the results as a sanity check.
End of explanation
np.random.seed(231)
x = np.random.randn(4, 3, 5, 5)
w = np.random.randn(2, 3, 3, 3)
b = np.random.randn(2,)
dout = np.random.randn(4, 2, 5, 5)
conv_param = {'stride': 1, 'pad': 1}
dx_num = eval_numerical_gradient_array(lambda x: conv_forward_naive(x, w, b, conv_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_forward_naive(x, w, b, conv_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_forward_naive(x, w, b, conv_param)[0], b, dout)
out, cache = conv_forward_naive(x, w, b, conv_param)
dx, dw, db = conv_backward_naive(dout, cache)
# Your errors should be around 1e-8'
print('Testing conv_backward_naive function')
print('dx error: ', rel_error(dx, dx_num))
print('dw error: ', rel_error(dw, dw_num))
print('db error: ', rel_error(db, db_num))
Explanation: Convolution: Naive backward pass
Implement the backward pass for the convolution operation in the function conv_backward_naive in the file cs231n/layers.py. Again, you don't need to worry too much about computational efficiency.
When you are done, run the following to check your backward pass with a numeric gradient check.
End of explanation
x_shape = (2, 3, 4, 4)
x = np.linspace(-0.3, 0.4, num=np.prod(x_shape)).reshape(x_shape)
pool_param = {'pool_width': 2, 'pool_height': 2, 'stride': 2}
out, _ = max_pool_forward_naive(x, pool_param)
correct_out = np.array([[[[-0.26315789, -0.24842105],
[-0.20421053, -0.18947368]],
[[-0.14526316, -0.13052632],
[-0.08631579, -0.07157895]],
[[-0.02736842, -0.01263158],
[ 0.03157895, 0.04631579]]],
[[[ 0.09052632, 0.10526316],
[ 0.14947368, 0.16421053]],
[[ 0.20842105, 0.22315789],
[ 0.26736842, 0.28210526]],
[[ 0.32631579, 0.34105263],
[ 0.38526316, 0.4 ]]]])
# Compare your output with ours. Difference should be around 1e-8.
print('Testing max_pool_forward_naive function:')
print('difference: ', rel_error(out, correct_out))
Explanation: Max pooling: Naive forward
Implement the forward pass for the max-pooling operation in the function max_pool_forward_naive in the file cs231n/layers.py. Again, don't worry too much about computational efficiency.
Check your implementation by running the following:
End of explanation
np.random.seed(231)
x = np.random.randn(3, 2, 8, 8)
dout = np.random.randn(3, 2, 4, 4)
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
dx_num = eval_numerical_gradient_array(lambda x: max_pool_forward_naive(x, pool_param)[0], x, dout)
out, cache = max_pool_forward_naive(x, pool_param)
dx = max_pool_backward_naive(dout, cache)
# Your error should be around 1e-12
print('Testing max_pool_backward_naive function:')
print('dx error: ', rel_error(dx, dx_num))
Explanation: Max pooling: Naive backward
Implement the backward pass for the max-pooling operation in the function max_pool_backward_naive in the file cs231n/layers.py. You don't need to worry about computational efficiency.
Check your implementation with numeric gradient checking by running the following:
End of explanation
from cs231n.fast_layers import conv_forward_fast, conv_backward_fast
from time import time
np.random.seed(231)
x = np.random.randn(100, 3, 31, 31)
w = np.random.randn(25, 3, 3, 3)
b = np.random.randn(25,)
dout = np.random.randn(100, 25, 16, 16)
conv_param = {'stride': 2, 'pad': 1}
t0 = time()
out_naive, cache_naive = conv_forward_naive(x, w, b, conv_param)
t1 = time()
out_fast, cache_fast = conv_forward_fast(x, w, b, conv_param)
t2 = time()
print('Testing conv_forward_fast:')
print('Naive: %fs' % (t1 - t0))
print('Fast: %fs' % (t2 - t1))
print('Speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('Difference: ', rel_error(out_naive, out_fast))
t0 = time()
dx_naive, dw_naive, db_naive = conv_backward_naive(dout, cache_naive)
t1 = time()
dx_fast, dw_fast, db_fast = conv_backward_fast(dout, cache_fast)
t2 = time()
print('\nTesting conv_backward_fast:')
print('Naive: %fs' % (t1 - t0))
print('Fast: %fs' % (t2 - t1))
print('Speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('dx difference: ', rel_error(dx_naive, dx_fast))
print('dw difference: ', rel_error(dw_naive, dw_fast))
print('db difference: ', rel_error(db_naive, db_fast))
from cs231n.fast_layers import max_pool_forward_fast, max_pool_backward_fast
np.random.seed(231)
x = np.random.randn(100, 3, 32, 32)
dout = np.random.randn(100, 3, 16, 16)
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
t0 = time()
out_naive, cache_naive = max_pool_forward_naive(x, pool_param)
t1 = time()
out_fast, cache_fast = max_pool_forward_fast(x, pool_param)
t2 = time()
print('Testing pool_forward_fast:')
print('Naive: %fs' % (t1 - t0))
print('fast: %fs' % (t2 - t1))
print('speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('difference: ', rel_error(out_naive, out_fast))
t0 = time()
dx_naive = max_pool_backward_naive(dout, cache_naive)
t1 = time()
dx_fast = max_pool_backward_fast(dout, cache_fast)
t2 = time()
print('\nTesting pool_backward_fast:')
print('Naive: %fs' % (t1 - t0))
print('speedup: %fx' % ((t1 - t0) / (t2 - t1)))
print('dx difference: ', rel_error(dx_naive, dx_fast))
Explanation: Fast layers
Making convolution and pooling layers fast can be challenging. To spare you the pain, we've provided fast implementations of the forward and backward passes for convolution and pooling layers in the file cs231n/fast_layers.py.
The fast convolution implementation depends on a Cython extension; to compile it you need to run the following from the cs231n directory:
bash
python setup.py build_ext --inplace
The API for the fast versions of the convolution and pooling layers is exactly the same as the naive versions that you implemented above: the forward pass receives data, weights, and parameters and produces outputs and a cache object; the backward pass recieves upstream derivatives and the cache object and produces gradients with respect to the data and weights.
NOTE: The fast implementation for pooling will only perform optimally if the pooling regions are non-overlapping and tile the input. If these conditions are not met then the fast pooling implementation will not be much faster than the naive implementation.
You can compare the performance of the naive and fast versions of these layers by running the following:
End of explanation
from cs231n.layer_utils import conv_relu_pool_forward, conv_relu_pool_backward
np.random.seed(231)
x = np.random.randn(2, 3, 16, 16)
w = np.random.randn(3, 3, 3, 3)
b = np.random.randn(3,)
dout = np.random.randn(2, 3, 8, 8)
conv_param = {'stride': 1, 'pad': 1}
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
out, cache = conv_relu_pool_forward(x, w, b, conv_param, pool_param)
dx, dw, db = conv_relu_pool_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], b, dout)
print('Testing conv_relu_pool')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
from cs231n.layer_utils import conv_relu_forward, conv_relu_backward
np.random.seed(231)
x = np.random.randn(2, 3, 8, 8)
w = np.random.randn(3, 3, 3, 3)
b = np.random.randn(3,)
dout = np.random.randn(2, 3, 8, 8)
conv_param = {'stride': 1, 'pad': 1}
out, cache = conv_relu_forward(x, w, b, conv_param)
dx, dw, db = conv_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: conv_relu_forward(x, w, b, conv_param)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: conv_relu_forward(x, w, b, conv_param)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: conv_relu_forward(x, w, b, conv_param)[0], b, dout)
print('Testing conv_relu:')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
Explanation: Convolutional "sandwich" layers
Previously we introduced the concept of "sandwich" layers that combine multiple operations into commonly used patterns. In the file cs231n/layer_utils.py you will find sandwich layers that implement a few commonly used patterns for convolutional networks.
End of explanation
N = 50
X = np.random.randn(N, 3, 32, 32)
y = np.random.randint(10, size=N)
model = ThreeLayerConvNet()
loss, grads = model.loss(X, y)
print('Initial loss (no regularization): ', loss)
model.reg = 0.5
loss, grads = model.loss(X, y)
print('Initial loss (with regularization): ', loss)
Explanation: Three-layer ConvNet
Now that you have implemented all the necessary layers, we can put them together into a simple convolutional network.
Open the file cs231n/classifiers/cnn.py and complete the implementation of the ThreeLayerConvNet class. Run the following cells to help you debug:
Sanity check loss
After you build a new network, one of the first things you should do is sanity check the loss. When we use the softmax loss, we expect the loss for random weights (and no regularization) to be about log(C) for C classes. When we add regularization this should go up.
End of explanation
num_inputs = 2
input_dim = (3, 16, 16)
reg = 0.0
num_classes = 10
np.random.seed(231)
X = np.random.randn(num_inputs, *input_dim)
y = np.random.randint(num_classes, size=num_inputs)
model = ThreeLayerConvNet(num_filters=3, filter_size=3,
input_dim=input_dim, hidden_dim=7,
dtype=np.float64)
loss, grads = model.loss(X, y)
for param_name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
param_grad_num = eval_numerical_gradient(f, model.params[param_name], verbose=False, h=1e-6)
e = rel_error(param_grad_num, grads[param_name])
print('%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name])))
Explanation: Gradient check
After the loss looks reasonable, use numeric gradient checking to make sure that your backward pass is correct. When you use numeric gradient checking you should use a small amount of artifical data and a small number of neurons at each layer. Note: correct implementations may still have relative errors up to 1e-2.
End of explanation
np.random.seed(231)
num_train = 100
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
model = ThreeLayerConvNet(weight_scale=1e-2)
solver = Solver(model, small_data,
num_epochs=15, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=1)
solver.train()
Explanation: Overfit small data
A nice trick is to train your model with just a few training samples. You should be able to overfit small datasets, which will result in very high training accuracy and comparatively low validation accuracy.
End of explanation
plt.subplot(2, 1, 1)
plt.plot(solver.loss_history, 'o')
plt.xlabel('iteration')
plt.ylabel('loss')
plt.subplot(2, 1, 2)
plt.plot(solver.train_acc_history, '-o')
plt.plot(solver.val_acc_history, '-o')
plt.legend(['train', 'val'], loc='upper left')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
Explanation: Plotting the loss, training accuracy, and validation accuracy should show clear overfitting:
End of explanation
model = ThreeLayerConvNet(weight_scale=0.001, hidden_dim=500, reg=0.001)
solver = Solver(model, data,
num_epochs=1, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=20)
solver.train()
Explanation: Train the net
By training the three-layer convolutional network for one epoch, you should achieve greater than 40% accuracy on the training set:
End of explanation
from cs231n.vis_utils import visualize_grid
grid = visualize_grid(model.params['W1'].transpose(0, 2, 3, 1))
plt.imshow(grid.astype('uint8'))
plt.axis('off')
plt.gcf().set_size_inches(5, 5)
plt.show()
Explanation: Visualize Filters
You can visualize the first-layer convolutional filters from the trained network by running the following:
End of explanation
np.random.seed(231)
# Check the training-time forward pass by checking means and variances
# of features both before and after spatial batch normalization
N, C, H, W = 2, 3, 4, 5
x = 4 * np.random.randn(N, C, H, W) + 10
print('Before spatial batch normalization:')
print(' Shape: ', x.shape)
print(' Means: ', x.mean(axis=(0, 2, 3)))
print(' Stds: ', x.std(axis=(0, 2, 3)))
# Means should be close to zero and stds close to one
gamma, beta = np.ones(C), np.zeros(C)
bn_param = {'mode': 'train'}
out, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param)
print('After spatial batch normalization:')
print(' Shape: ', out.shape)
print(' Means: ', out.mean(axis=(0, 2, 3)))
print(' Stds: ', out.std(axis=(0, 2, 3)))
# Means should be close to beta and stds close to gamma
gamma, beta = np.asarray([3, 4, 5]), np.asarray([6, 7, 8])
out, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param)
print('After spatial batch normalization (nontrivial gamma, beta):')
print(' Shape: ', out.shape)
print(' Means: ', out.mean(axis=(0, 2, 3)))
print(' Stds: ', out.std(axis=(0, 2, 3)))
np.random.seed(231)
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
N, C, H, W = 10, 4, 11, 12
bn_param = {'mode': 'train'}
gamma = np.ones(C)
beta = np.zeros(C)
for t in range(50):
x = 2.3 * np.random.randn(N, C, H, W) + 13
spatial_batchnorm_forward(x, gamma, beta, bn_param)
bn_param['mode'] = 'test'
x = 2.3 * np.random.randn(N, C, H, W) + 13
a_norm, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print('After spatial batch normalization (test-time):')
print(' means: ', a_norm.mean(axis=(0, 2, 3)))
print(' stds: ', a_norm.std(axis=(0, 2, 3)))
Explanation: Spatial Batch Normalization
We already saw that batch normalization is a very useful technique for training deep fully-connected networks. Batch normalization can also be used for convolutional networks, but we need to tweak it a bit; the modification will be called "spatial batch normalization."
Normally batch-normalization accepts inputs of shape (N, D) and produces outputs of shape (N, D), where we normalize across the minibatch dimension N. For data coming from convolutional layers, batch normalization needs to accept inputs of shape (N, C, H, W) and produce outputs of shape (N, C, H, W) where the N dimension gives the minibatch size and the (H, W) dimensions give the spatial size of the feature map.
If the feature map was produced using convolutions, then we expect the statistics of each feature channel to be relatively consistent both between different imagesand different locations within the same image. Therefore spatial batch normalization computes a mean and variance for each of the C feature channels by computing statistics over both the minibatch dimension N and the spatial dimensions H and W.
Spatial batch normalization: forward
In the file cs231n/layers.py, implement the forward pass for spatial batch normalization in the function spatial_batchnorm_forward. Check your implementation by running the following:
End of explanation
np.random.seed(231)
N, C, H, W = 2, 3, 4, 5
x = 5 * np.random.randn(N, C, H, W) + 12
gamma = np.random.randn(C)
beta = np.random.randn(C)
dout = np.random.randn(N, C, H, W)
bn_param = {'mode': 'train'}
fx = lambda x: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
fb = lambda b: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma, dout)
db_num = eval_numerical_gradient_array(fb, beta, dout)
_, cache = spatial_batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = spatial_batchnorm_backward(dout, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dgamma error: ', rel_error(da_num, dgamma))
print('dbeta error: ', rel_error(db_num, dbeta))
Explanation: Spatial batch normalization: backward
In the file cs231n/layers.py, implement the backward pass for spatial batch normalization in the function spatial_batchnorm_backward. Run the following to check your implementation using a numeric gradient check:
End of explanation |
14,743 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Working with CTF data
Step1: To reduce memory consumption and running time, some of the steps are
precomputed. To run everything from scratch change use_precomputed to
False. With use_precomputed = False running time of this script can
be several minutes even on a fast computer.
Step2: The data was collected with a CTF 275 system at 2400 Hz and low-pass
filtered at 600 Hz. Here the data and empty room data files are read to
construct instances of
Step3: In the memory saving mode we use preload=False and use the memory
efficient IO which loads the data on demand. However, filtering and some
other functions require the data to be preloaded into memory.
Step4: The data array consists of 274 MEG axial gradiometers, 26 MEG reference
sensors and 2 EEG electrodes (Cz and Pz). In addition
Step5: For noise reduction, a set of bad segments have been identified and stored
in csv files. The bad segments are later used to reject epochs that overlap
with them.
The file for the second run also contains some saccades. The saccades are
removed by using SSP. We use pandas to read the data from the csv files. You
can also view the files with your favorite text editor.
Step6: Here we compute the saccade and EOG projectors for magnetometers and add
them to the raw data. The projectors are added to both runs.
Step7: Visually inspect the effects of projections. Click on 'proj' button at the
bottom right corner to toggle the projectors on/off. EOG events can be
plotted by adding the event list as a keyword argument. As the bad segments
and saccades were added as annotations to the raw data, they are plotted as
well.
Step8: Typical preprocessing step is the removal of power line artifact (50 Hz or
60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the
original 60 Hz artifact and the harmonics. The power spectra are plotted
before and after the filtering to show the effect. The drop after 600 Hz
appears because the data was filtered during the acquisition. In memory
saving mode we do the filtering at evoked stage, which is not something you
usually would do.
Step9: We also lowpass filter the data at 100 Hz to remove the hf components.
Step10: Epoching and averaging.
First some parameters are defined and events extracted from the stimulus
channel (UPPT001). The rejection thresholds are defined as peak-to-peak
values and are in T / m for gradiometers, T for magnetometers and
V for EOG and EEG channels.
Step11: The event timing is adjusted by comparing the trigger times on detected
sound onsets on channel UADC001-4408.
Step12: We mark a set of bad channels that seem noisier than others. This can also
be done interactively with raw.plot by clicking the channel name
(or the line). The marked channels are added as bad when the browser window
is closed.
Step13: The epochs (trials) are created for MEG channels. First we find the picks
for MEG and EOG channels. Then the epochs are constructed using these picks.
The epochs overlapping with annotated bad segments are also rejected by
default. To turn off rejection by bad segments (as was done earlier with
saccades) you can use keyword reject_by_annotation=False.
Step14: We only use first 40 good epochs from each run. Since we first drop the bad
epochs, the indices of the epochs are no longer same as in the original
epochs collection. Investigation of the event timings reveals that first
epoch from the second run corresponds to index 182.
Step15: The averages for each conditions are computed.
Step16: Typical preprocessing step is the removal of power line artifact (50 Hz or
60 Hz). Here we lowpass filter the data at 40 Hz, which will remove all
line artifacts (and high frequency information). Normally this would be done
to raw data (with
Step17: Here we plot the ERF of standard and deviant conditions. In both conditions
we can see the P50 and N100 responses. The mismatch negativity is visible
only in the deviant condition around 100-200 ms. P200 is also visible around
170 ms in both conditions but much stronger in the standard condition. P300
is visible in deviant condition only (decision making in preparation of the
button press). You can view the topographies from a certain time span by
painting an area with clicking and holding the left mouse button.
Step18: Show activations as topography figures.
Step19: We can see the MMN effect more clearly by looking at the difference between
the two conditions. P50 and N100 are no longer visible, but MMN/P200 and
P300 are emphasised.
Step20: Source estimation.
We compute the noise covariance matrix from the empty room measurement
and use it for the other runs.
Step21: The transformation is read from a file
Step22: To save time and memory, the forward solution is read from a file. Set
use_precomputed=False in the beginning of this script to build the
forward solution from scratch. The head surfaces for constructing a BEM
solution are read from a file. Since the data only contains MEG channels, we
only need the inner skull surface for making the forward solution. For more
information
Step23: The sources are computed using dSPM method and plotted on an inflated brain
surface. For interactive controls over the image, use keyword
time_viewer=True.
Standard condition.
Step24: Deviant condition.
Step25: Difference. | Python Code:
# Authors: Mainak Jas <[email protected]>
# Eric Larson <[email protected]>
# Jaakko Leppakangas <[email protected]>
#
# License: BSD-3-Clause
import os.path as op
import pandas as pd
import numpy as np
import mne
from mne import combine_evoked
from mne.minimum_norm import apply_inverse
from mne.datasets.brainstorm import bst_auditory
from mne.io import read_raw_ctf
print(__doc__)
Explanation: Working with CTF data: the Brainstorm auditory dataset
Here we compute the evoked from raw for the auditory Brainstorm
tutorial dataset. For comparison, see :footcite:TadelEtAl2011 and the
associated brainstorm site.
Experiment:
- One subject, 2 acquisition runs 6 minutes each.
- Each run contains 200 regular beeps and 40 easy deviant beeps.
- Random ISI: between 0.7s and 1.7s seconds, uniformly distributed.
- Button pressed when detecting a deviant with the right index finger.
The specifications of this dataset were discussed initially on the
FieldTrip bug tracker_.
End of explanation
use_precomputed = True
Explanation: To reduce memory consumption and running time, some of the steps are
precomputed. To run everything from scratch change use_precomputed to
False. With use_precomputed = False running time of this script can
be several minutes even on a fast computer.
End of explanation
data_path = bst_auditory.data_path()
subject = 'bst_auditory'
subjects_dir = op.join(data_path, 'subjects')
raw_fname1 = op.join(data_path, 'MEG', subject, 'S01_AEF_20131218_01.ds')
raw_fname2 = op.join(data_path, 'MEG', subject, 'S01_AEF_20131218_02.ds')
erm_fname = op.join(data_path, 'MEG', subject, 'S01_Noise_20131218_01.ds')
Explanation: The data was collected with a CTF 275 system at 2400 Hz and low-pass
filtered at 600 Hz. Here the data and empty room data files are read to
construct instances of :class:mne.io.Raw.
End of explanation
raw = read_raw_ctf(raw_fname1)
n_times_run1 = raw.n_times
# Here we ignore that these have different device<->head transforms
mne.io.concatenate_raws(
[raw, read_raw_ctf(raw_fname2)], on_mismatch='ignore')
raw_erm = read_raw_ctf(erm_fname)
Explanation: In the memory saving mode we use preload=False and use the memory
efficient IO which loads the data on demand. However, filtering and some
other functions require the data to be preloaded into memory.
End of explanation
raw.set_channel_types({'HEOG': 'eog', 'VEOG': 'eog', 'ECG': 'ecg'})
if not use_precomputed:
# Leave out the two EEG channels for easier computation of forward.
raw.pick(['meg', 'stim', 'misc', 'eog', 'ecg']).load_data()
Explanation: The data array consists of 274 MEG axial gradiometers, 26 MEG reference
sensors and 2 EEG electrodes (Cz and Pz). In addition:
1 stim channel for marking presentation times for the stimuli
1 audio channel for the sent signal
1 response channel for recording the button presses
1 ECG bipolar
2 EOG bipolar (vertical and horizontal)
12 head tracking channels
20 unused channels
Notice also that the digitized electrode positions (stored in a .pos file)
were automatically loaded and added to the ~mne.io.Raw object.
The head tracking channels and the unused channels are marked as misc
channels. Here we define the EOG and ECG channels.
End of explanation
annotations_df = pd.DataFrame()
offset = n_times_run1
for idx in [1, 2]:
csv_fname = op.join(data_path, 'MEG', 'bst_auditory',
'events_bad_0%s.csv' % idx)
df = pd.read_csv(csv_fname, header=None,
names=['onset', 'duration', 'id', 'label'])
print('Events from run {0}:'.format(idx))
print(df)
df['onset'] += offset * (idx - 1)
annotations_df = pd.concat([annotations_df, df], axis=0)
saccades_events = df[df['label'] == 'saccade'].values[:, :3].astype(int)
# Conversion from samples to times:
onsets = annotations_df['onset'].values / raw.info['sfreq']
durations = annotations_df['duration'].values / raw.info['sfreq']
descriptions = annotations_df['label'].values
annotations = mne.Annotations(onsets, durations, descriptions)
raw.set_annotations(annotations)
del onsets, durations, descriptions
Explanation: For noise reduction, a set of bad segments have been identified and stored
in csv files. The bad segments are later used to reject epochs that overlap
with them.
The file for the second run also contains some saccades. The saccades are
removed by using SSP. We use pandas to read the data from the csv files. You
can also view the files with your favorite text editor.
End of explanation
saccade_epochs = mne.Epochs(raw, saccades_events, 1, 0., 0.5, preload=True,
baseline=(None, None),
reject_by_annotation=False)
projs_saccade = mne.compute_proj_epochs(saccade_epochs, n_mag=1, n_eeg=0,
desc_prefix='saccade')
if use_precomputed:
proj_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-eog-proj.fif')
projs_eog = mne.read_proj(proj_fname)[0]
else:
projs_eog, _ = mne.preprocessing.compute_proj_eog(raw.load_data(),
n_mag=1, n_eeg=0)
raw.add_proj(projs_saccade)
raw.add_proj(projs_eog)
del saccade_epochs, saccades_events, projs_eog, projs_saccade # To save memory
Explanation: Here we compute the saccade and EOG projectors for magnetometers and add
them to the raw data. The projectors are added to both runs.
End of explanation
raw.plot(block=True)
Explanation: Visually inspect the effects of projections. Click on 'proj' button at the
bottom right corner to toggle the projectors on/off. EOG events can be
plotted by adding the event list as a keyword argument. As the bad segments
and saccades were added as annotations to the raw data, they are plotted as
well.
End of explanation
if not use_precomputed:
raw.plot_psd(tmax=np.inf, picks='meg')
notches = np.arange(60, 181, 60)
raw.notch_filter(notches, phase='zero-double', fir_design='firwin2')
raw.plot_psd(tmax=np.inf, picks='meg')
Explanation: Typical preprocessing step is the removal of power line artifact (50 Hz or
60 Hz). Here we notch filter the data at 60, 120 and 180 to remove the
original 60 Hz artifact and the harmonics. The power spectra are plotted
before and after the filtering to show the effect. The drop after 600 Hz
appears because the data was filtered during the acquisition. In memory
saving mode we do the filtering at evoked stage, which is not something you
usually would do.
End of explanation
if not use_precomputed:
raw.filter(None, 100., h_trans_bandwidth=0.5, filter_length='10s',
phase='zero-double', fir_design='firwin2')
Explanation: We also lowpass filter the data at 100 Hz to remove the hf components.
End of explanation
tmin, tmax = -0.1, 0.5
event_id = dict(standard=1, deviant=2)
reject = dict(mag=4e-12, eog=250e-6)
# find events
events = mne.find_events(raw, stim_channel='UPPT001')
Explanation: Epoching and averaging.
First some parameters are defined and events extracted from the stimulus
channel (UPPT001). The rejection thresholds are defined as peak-to-peak
values and are in T / m for gradiometers, T for magnetometers and
V for EOG and EEG channels.
End of explanation
sound_data = raw[raw.ch_names.index('UADC001-4408')][0][0]
onsets = np.where(np.abs(sound_data) > 2. * np.std(sound_data))[0]
min_diff = int(0.5 * raw.info['sfreq'])
diffs = np.concatenate([[min_diff + 1], np.diff(onsets)])
onsets = onsets[diffs > min_diff]
assert len(onsets) == len(events)
diffs = 1000. * (events[:, 0] - onsets) / raw.info['sfreq']
print('Trigger delay removed (μ ± σ): %0.1f ± %0.1f ms'
% (np.mean(diffs), np.std(diffs)))
events[:, 0] = onsets
del sound_data, diffs
Explanation: The event timing is adjusted by comparing the trigger times on detected
sound onsets on channel UADC001-4408.
End of explanation
raw.info['bads'] = ['MLO52-4408', 'MRT51-4408', 'MLO42-4408', 'MLO43-4408']
Explanation: We mark a set of bad channels that seem noisier than others. This can also
be done interactively with raw.plot by clicking the channel name
(or the line). The marked channels are added as bad when the browser window
is closed.
End of explanation
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=['meg', 'eog'],
baseline=(None, 0), reject=reject, preload=False,
proj=True)
Explanation: The epochs (trials) are created for MEG channels. First we find the picks
for MEG and EOG channels. Then the epochs are constructed using these picks.
The epochs overlapping with annotated bad segments are also rejected by
default. To turn off rejection by bad segments (as was done earlier with
saccades) you can use keyword reject_by_annotation=False.
End of explanation
epochs.drop_bad()
# avoid warning about concatenating with annotations
epochs.set_annotations(None)
epochs_standard = mne.concatenate_epochs([epochs['standard'][range(40)],
epochs['standard'][182:222]])
epochs_standard.load_data() # Resampling to save memory.
epochs_standard.resample(600, npad='auto')
epochs_deviant = epochs['deviant'].load_data()
epochs_deviant.resample(600, npad='auto')
del epochs
Explanation: We only use first 40 good epochs from each run. Since we first drop the bad
epochs, the indices of the epochs are no longer same as in the original
epochs collection. Investigation of the event timings reveals that first
epoch from the second run corresponds to index 182.
End of explanation
evoked_std = epochs_standard.average()
evoked_dev = epochs_deviant.average()
del epochs_standard, epochs_deviant
Explanation: The averages for each conditions are computed.
End of explanation
for evoked in (evoked_std, evoked_dev):
evoked.filter(l_freq=None, h_freq=40., fir_design='firwin')
Explanation: Typical preprocessing step is the removal of power line artifact (50 Hz or
60 Hz). Here we lowpass filter the data at 40 Hz, which will remove all
line artifacts (and high frequency information). Normally this would be done
to raw data (with :func:mne.io.Raw.filter), but to reduce memory
consumption of this tutorial, we do it at evoked stage. (At the raw stage,
you could alternatively notch filter with :func:mne.io.Raw.notch_filter.)
End of explanation
evoked_std.plot(window_title='Standard', gfp=True, time_unit='s')
evoked_dev.plot(window_title='Deviant', gfp=True, time_unit='s')
Explanation: Here we plot the ERF of standard and deviant conditions. In both conditions
we can see the P50 and N100 responses. The mismatch negativity is visible
only in the deviant condition around 100-200 ms. P200 is also visible around
170 ms in both conditions but much stronger in the standard condition. P300
is visible in deviant condition only (decision making in preparation of the
button press). You can view the topographies from a certain time span by
painting an area with clicking and holding the left mouse button.
End of explanation
times = np.arange(0.05, 0.301, 0.025)
evoked_std.plot_topomap(times=times, title='Standard', time_unit='s')
evoked_dev.plot_topomap(times=times, title='Deviant', time_unit='s')
Explanation: Show activations as topography figures.
End of explanation
evoked_difference = combine_evoked([evoked_dev, evoked_std], weights=[1, -1])
evoked_difference.plot(window_title='Difference', gfp=True, time_unit='s')
Explanation: We can see the MMN effect more clearly by looking at the difference between
the two conditions. P50 and N100 are no longer visible, but MMN/P200 and
P300 are emphasised.
End of explanation
reject = dict(mag=4e-12)
cov = mne.compute_raw_covariance(raw_erm, reject=reject)
cov.plot(raw_erm.info)
del raw_erm
Explanation: Source estimation.
We compute the noise covariance matrix from the empty room measurement
and use it for the other runs.
End of explanation
trans_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-trans.fif')
trans = mne.read_trans(trans_fname)
Explanation: The transformation is read from a file:
End of explanation
if use_precomputed:
fwd_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-meg-oct-6-fwd.fif')
fwd = mne.read_forward_solution(fwd_fname)
else:
src = mne.setup_source_space(subject, spacing='ico4',
subjects_dir=subjects_dir, overwrite=True)
model = mne.make_bem_model(subject=subject, ico=4, conductivity=[0.3],
subjects_dir=subjects_dir)
bem = mne.make_bem_solution(model)
fwd = mne.make_forward_solution(evoked_std.info, trans=trans, src=src,
bem=bem)
inv = mne.minimum_norm.make_inverse_operator(evoked_std.info, fwd, cov)
snr = 3.0
lambda2 = 1.0 / snr ** 2
del fwd
Explanation: To save time and memory, the forward solution is read from a file. Set
use_precomputed=False in the beginning of this script to build the
forward solution from scratch. The head surfaces for constructing a BEM
solution are read from a file. Since the data only contains MEG channels, we
only need the inner skull surface for making the forward solution. For more
information: CHDBBCEJ, :func:mne.setup_source_space,
bem-model, :func:mne.bem.make_watershed_bem.
End of explanation
stc_standard = mne.minimum_norm.apply_inverse(evoked_std, inv, lambda2, 'dSPM')
brain = stc_standard.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.1, time_unit='s')
del stc_standard, brain
Explanation: The sources are computed using dSPM method and plotted on an inflated brain
surface. For interactive controls over the image, use keyword
time_viewer=True.
Standard condition.
End of explanation
stc_deviant = mne.minimum_norm.apply_inverse(evoked_dev, inv, lambda2, 'dSPM')
brain = stc_deviant.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.1, time_unit='s')
del stc_deviant, brain
Explanation: Deviant condition.
End of explanation
stc_difference = apply_inverse(evoked_difference, inv, lambda2, 'dSPM')
brain = stc_difference.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.15, time_unit='s')
Explanation: Difference.
End of explanation |
14,744 |
Given the following text problem statement, write Python code to implement the functionality described below in problem statement
Problem:
split into train test sets
| Python Code::
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
|
14,745 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Porkchops with poliastro
Porkchops are also known as mission design curves since they show different parameters used to design the ballistic trajectories for the targetting problem such us
Step1: Plot that porkchop!
All that we must do is pass the two bodies, the two time spans and some extra plotting parameters realted to different information along the figure such us | Python Code:
import astropy.units as u
from poliastro.plotting.porkchop import porkchop
from poliastro.bodies import Earth, Mars
from poliastro.util import time_range
launch_span = time_range("2005-04-30", end="2005-10-07")
arrival_span = time_range("2005-11-16", end="2006-12-21")
Explanation: Porkchops with poliastro
Porkchops are also known as mission design curves since they show different parameters used to design the ballistic trajectories for the targetting problem such us:
Time of flight (TFL)
Launch energy (C3L)
Arrival velocity (VHP)
For the moment, poliastro is only capable of creating these mission plots between poliastro.bodies objects. However, it is intended for future versions to make it able for plotting porkchops between NEOs also.
Basic modules
For creating a porkchop plot with poliastro, we need to import the porkchop function from the poliastro.plotting.porkchop module. Also, two poliastro.bodies are necessary for computing the targetting problem associated. Finally by making use of time_range, a very useful function available at poliastro.utils it is possible to define a span of launching and arrival dates for the problem.
End of explanation
dv_dpt, dv_arr, c3dpt, c3arr, tof = porkchop(Earth, Mars,
launch_span, arrival_span)
Explanation: Plot that porkchop!
All that we must do is pass the two bodies, the two time spans and some extra plotting parameters realted to different information along the figure such us:
If we want poliastro to plot time of flight lines: tfl=True/False
If we want poliastro to plot arrival velocity: vhp=True/False
The maximum value for C3 to be ploted: max_c3=45 * u.km**2 / u.s**2 (by default)
End of explanation |
14,746 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
최적 정규화
알파 최적화 문제
오차-분산 트레이드오프
모형 최적화 뒤의 편향 오차(bias)와 오차 분산(variance)는 다음과 같은 트레이드-오프(trade-off) 관계를 가진다. 즉, 어느 하나가 작아지면 다른 하나는 커지게 된다.
* 이 방식 잘 안 쓴다. 안 쓰는 이유는 분산과 바이어스를 또 얼만큼 맞춰야 하는지 기준을 정해야 하기 때문
함수 $f$를 다른 함수 $\hat{f}$로 모사(approximation)할 때 오차를 $\epsilon$이라고 하면
$$
y = f + \epsilon \approx \hat{f}
$$
이다. 이때
$$\text{E}[y] = \text{E}[f + \epsilon] = \text{E}[f] = f$$
편향 오차(bias)는 다음과 같이 정의한다.
$$
\begin{align}
\text{Bias}[\hat{f}] = f - \text{E}[\hat{f}]
\end{align}
$$
오차의 분산(Variance)은 다음과 같이 정의한다.
$$
\begin{align}
\text{Var}[\hat{f}] = \text{E}[ ( \hat{f} - \text{E}[\hat{f}])^2 ]
\end{align}
$$
이 때 편향 오차와 오차 분산은 다음과 같은 관계가 성립하므로 동시에 줄일 수 없다.
$$
\begin{align}
\text{E}[(y - \hat{f})^2]
& = (\text{Bias}[\hat{f}(x)])^2 + \text{Var}[\hat{f}(x)] + \text{Var}[\epsilon] \
\end{align}
$$
<img src="https
Step1: 막대그래프에서...다항식의 경우
스코어가 높을수록 좋은 것이다. 밑에 차수는 회귀분석 차수. 막대 높이는 평균 점수
흔들림은 검은 선. 웨이트. 바이오스라고 함. 점점 커지는 것을 오버피팅이라고 한다. 일반적으로 일어나는 현상.
r스퀘어의 경우에는 계속해서 평균값이 올라간다. 그러나 조정하면 내려가게 된다. 제한조건을 주면 내려가게 되는 것이다.
하이퍼 모수 최적화
하이퍼 모수가 바뀌는 경우에도 마찬가지로 오차-분산 트레이드오프가 성립하므로 최적의 하이퍼 모수를 찾는 작업이 필요하다.
Step2: 그래프가 저렇게 내려오지 않는 경우는? 오버피팅이 일어나지 않는 경우다. 그래서 굳이 정규화하지 않아도 된다.
Step3: One Standard Error Rule
실제로 하이퍼 모수 값을 변경해가면서 최적 값은 오차가 존재하는 부정확한 값이다. 이러한 오차를 고려해서 표준 편차(1 standard deviation) 정도의 오차는 감수하더라도 더 단순한(정규화 모형에서는 더 제약 조건이 강한) 모형을 선택하는 것이 실용적이다.
기존의 방법은 CV했을 때 오차가 있는데 그 오차가 크기 때문에 진짜로 optimal이라는 보장이 없다.
이왕이면 오차가 있더라도 정규화 된 것을 원한다. 똑같은 값이면 정규화 된 것을 선호한다. 그래서 오른쪽 값을 보는데 너무 떨어졌다. 원래의 값과 비슷해야 고를 수 있는데. 아까 정한 최적화 점에서 원 스탠다드 디비제이션 움직인만큼 이동한 것을 고르자. 그래서 사실은 0.04가 아니라 좀 더 큰 숫자를 고르자.
그런데 이 방법 또한 현업에서 쓰는 하나의 방법일 뿐이다. 하지만 실제로는 분산이 너무 크다. 그래서 원 스탠다드로 쓰기에 적합하지가 않은 경우가 나올 수도 있다. | Python Code:
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import cross_val_score
n_samples = 1000
np.random.seed(0)
X = np.sort(np.random.rand(n_samples))
y = np.cos(1.5 * np.pi * X) + np.random.randn(n_samples) * 0.1
X = X[:, np.newaxis]
K = 100
def cv_mse(degree):
polynomial_features = PolynomialFeatures(degree=degree)
linear_regression = LinearRegression()
model = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
scores = -cross_val_score(model, X, y, "mean_squared_error", cv=K)
return scores
scores1 = cv_mse(3)
sns.distplot(scores1)
print(scores1.mean(), scores1.std())
D = 9
degrees = 2**np.arange(D)
all_scores = np.zeros((K, D))
for i, d in enumerate(degrees):
scores = cv_mse(d)
all_scores[:, i] = scores
df = pd.DataFrame(-np.log(all_scores), columns=degrees)
df.describe()
df.mean().plot(kind="bar", rot=0, yerr=df.std())
plt.show()
Explanation: 최적 정규화
알파 최적화 문제
오차-분산 트레이드오프
모형 최적화 뒤의 편향 오차(bias)와 오차 분산(variance)는 다음과 같은 트레이드-오프(trade-off) 관계를 가진다. 즉, 어느 하나가 작아지면 다른 하나는 커지게 된다.
* 이 방식 잘 안 쓴다. 안 쓰는 이유는 분산과 바이어스를 또 얼만큼 맞춰야 하는지 기준을 정해야 하기 때문
함수 $f$를 다른 함수 $\hat{f}$로 모사(approximation)할 때 오차를 $\epsilon$이라고 하면
$$
y = f + \epsilon \approx \hat{f}
$$
이다. 이때
$$\text{E}[y] = \text{E}[f + \epsilon] = \text{E}[f] = f$$
편향 오차(bias)는 다음과 같이 정의한다.
$$
\begin{align}
\text{Bias}[\hat{f}] = f - \text{E}[\hat{f}]
\end{align}
$$
오차의 분산(Variance)은 다음과 같이 정의한다.
$$
\begin{align}
\text{Var}[\hat{f}] = \text{E}[ ( \hat{f} - \text{E}[\hat{f}])^2 ]
\end{align}
$$
이 때 편향 오차와 오차 분산은 다음과 같은 관계가 성립하므로 동시에 줄일 수 없다.
$$
\begin{align}
\text{E}[(y - \hat{f})^2]
& = (\text{Bias}[\hat{f}(x)])^2 + \text{Var}[\hat{f}(x)] + \text{Var}[\epsilon] \
\end{align}
$$
<img src="https://datascienceschool.net/upfiles/c1d938635778456d9ccc94d9fbf59e22.png">
(증명)
$$
\begin{align}
\text{Var}[y]
&= \text{E}[(y - \text{E}[y])^2] \
&= \text{E}[(y - f)^2] \
&= \text{E}[(f + \epsilon - f)^2] \
&= \text{E}[\epsilon^2] \
&= \text{Var}[\epsilon]
\end{align}
$$
$$
\begin{align}
\text{E}\big[(y - \hat{f})^2\big]
& = \text{E}[y^2] + \text{E}[\hat{f}^2] - \text{E}[2y\hat{f}] \
& = \text{Var}[y] + \text{E}[y]^2 + \text{Var}[\hat{f}] + \text{E}[\hat{f}]^2 - 2\text{E}[y\hat{f}] \
& = \text{Var}[y] + f^2 + \text{Var}[\hat{f}] + \text{E}[\hat{f}]^2 - 2f\text{E}[\hat{f}] \
& = \text{Var}[y] + \text{Var}[\hat{f}] + f^2 - 2f\text{E}[\hat{f}] + \text{E}[\hat{f}]^2 \
& = \text{Var}[y] + \text{Var}[\hat{f}] + (f - \text{E}[\hat{f}])^2 \
& = \text{Var}[y] + \text{Var}[\hat{f}] + (\text{Bias}[\hat{f}])^2 \
\end{align}
$$
다항회귀에서 차수를 바꾸어가면서 오차와 분산을 측정하면 다음과 같다.
End of explanation
from sklearn.datasets import load_diabetes
from sklearn.linear_model import Lasso
from sklearn.cross_validation import cross_val_score
data = load_diabetes()
X = data.data[:200]
y = data.target[:200]
#Lasso로 해보겠다.
model = Lasso()
alphas = np.logspace(-4, -.5, 50)
scores = list()
for alpha in alphas:
model.alpha = alpha
this_scores = cross_val_score(model, X, y, "mean_squared_error", cv=5)
scores.append(np.mean(this_scores))
plt.semilogx(alphas, scores)
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle=':')
plt.show()
Explanation: 막대그래프에서...다항식의 경우
스코어가 높을수록 좋은 것이다. 밑에 차수는 회귀분석 차수. 막대 높이는 평균 점수
흔들림은 검은 선. 웨이트. 바이오스라고 함. 점점 커지는 것을 오버피팅이라고 한다. 일반적으로 일어나는 현상.
r스퀘어의 경우에는 계속해서 평균값이 올라간다. 그러나 조정하면 내려가게 된다. 제한조건을 주면 내려가게 되는 것이다.
하이퍼 모수 최적화
하이퍼 모수가 바뀌는 경우에도 마찬가지로 오차-분산 트레이드오프가 성립하므로 최적의 하이퍼 모수를 찾는 작업이 필요하다.
End of explanation
from sklearn.linear_model import LassoCV
alphas = np.logspace(-4, -.5, 50)
lasso_cv = LassoCV(alphas=alphas, cv=5)
lasso_cv.fit(X, y)
print(lasso_cv.alpha_ )
scores = -lasso_cv.mse_path_.mean(axis=1)
plt.semilogx(lasso_cv.alphas_, scores)
plt.axhline(np.max(scores), linestyle=':')
plt.axvline(lasso_cv.alpha_ , linestyle=':')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.show()
Explanation: 그래프가 저렇게 내려오지 않는 경우는? 오버피팅이 일어나지 않는 경우다. 그래서 굳이 정규화하지 않아도 된다.
End of explanation
from sklearn.linear_model import LassoCV
alphas = np.logspace(-4, -.5, 50)
lasso_cv = LassoCV(alphas=alphas, cv=5)
lasso_cv.fit(X, y)
scores = -lasso_cv.mse_path_.mean(axis=1)
scores_std = lasso_cv.mse_path_.std(axis=1)
scores_std1 = scores + scores_std / np.sqrt(len(lasso_cv.mse_path_))
scores_std2 = scores - scores_std / np.sqrt(len(lasso_cv.mse_path_))
alpha_1se = lasso_cv.alphas_[np.argmax(scores_std1 > np.max(scores))]
print(alpha_1se)
plt.semilogx(lasso_cv.alphas_, scores)
plt.semilogx(lasso_cv.alphas_, scores_std1, 'o-')
plt.semilogx(lasso_cv.alphas_, scores_std2, 'o-')
plt.axhline(np.max(scores), linestyle=':')
plt.axvline(lasso_cv.alpha_ , linestyle=':')
plt.axvline(alpha_1se, linestyle=':')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.show()
Explanation: One Standard Error Rule
실제로 하이퍼 모수 값을 변경해가면서 최적 값은 오차가 존재하는 부정확한 값이다. 이러한 오차를 고려해서 표준 편차(1 standard deviation) 정도의 오차는 감수하더라도 더 단순한(정규화 모형에서는 더 제약 조건이 강한) 모형을 선택하는 것이 실용적이다.
기존의 방법은 CV했을 때 오차가 있는데 그 오차가 크기 때문에 진짜로 optimal이라는 보장이 없다.
이왕이면 오차가 있더라도 정규화 된 것을 원한다. 똑같은 값이면 정규화 된 것을 선호한다. 그래서 오른쪽 값을 보는데 너무 떨어졌다. 원래의 값과 비슷해야 고를 수 있는데. 아까 정한 최적화 점에서 원 스탠다드 디비제이션 움직인만큼 이동한 것을 고르자. 그래서 사실은 0.04가 아니라 좀 더 큰 숫자를 고르자.
그런데 이 방법 또한 현업에서 쓰는 하나의 방법일 뿐이다. 하지만 실제로는 분산이 너무 크다. 그래서 원 스탠다드로 쓰기에 적합하지가 않은 경우가 나올 수도 있다.
End of explanation |
14,747 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
BottomUpParceLiNGAM
Import and settings
In this example, we need to import numpy, pandas, and graphviz in addition to lingam.
Step1: Test data
First, we generate a causal structure with 7 variables. Then we create a dataset with 6 variables from x0 to x5, with x6 being the latent variable for x2 and x3.
Step2: Causal Discovery
To run causal discovery, we create a BottomUpParceLiNGAM object and call the fit method.
Step3: Using the causal_order_ properties, we can see the causal ordering as a result of the causal discovery. x2 and x3, which have latent confounders as parents, are stored in a list without causal ordering.
Step4: Also, using the adjacency_matrix_ properties, we can see the adjacency matrix as a result of the causal discovery. The coefficients between variables with latent confounders are np.nan.
Step5: We can draw a causal graph by utility funciton.
Step6: Independence between error variables
To check if the LiNGAM assumption is broken, we can get p-values of independence between error variables. The value in the i-th row and j-th column of the obtained matrix shows the p-value of the independence of the error variables $e_i$ and $e_j$.
Step7: Bootstrapping
We call bootstrap() method instead of fit(). Here, the second argument specifies the number of bootstrap sampling.
Step8: Causal Directions
Since BootstrapResult object is returned, we can get the ranking of the causal directions extracted by get_causal_direction_counts() method. In the following sample code, n_directions option is limited to the causal directions of the top 8 rankings, and min_causal_effect option is limited to causal directions with a coefficient of 0.01 or more.
Step9: We can check the result by utility function.
Step10: Directed Acyclic Graphs
Also, using the get_directed_acyclic_graph_counts() method, we can get the ranking of the DAGs extracted. In the following sample code, n_dags option is limited to the dags of the top 3 rankings, and min_causal_effect option is limited to causal directions with a coefficient of 0.01 or more.
Step11: We can check the result by utility function.
Step12: Probability
Using the get_probabilities() method, we can get the probability of bootstrapping.
Step13: Total Causal Effects
Using the get_total_causal_effects() method, we can get the list of total causal effect. The total causal effects we can get are dictionary type variable.
We can display the list nicely by assigning it to pandas.DataFrame. Also, we have replaced the variable index with a label below.
Step14: We can easily perform sorting operations with pandas.DataFrame.
Step15: And with pandas.DataFrame, we can easily filter by keywords. The following code extracts the causal direction towards x1.
Step16: Because it holds the raw data of the total causal effect (the original data for calculating the median), it is possible to draw a histogram of the values of the causal effect, as shown below.
Step17: Bootstrap Probability of Path
Using the get_paths() method, we can explore all paths from any variable to any variable and calculate the bootstrap probability for each path. The path will be output as an array of variable indices. For example, the array [3, 0, 1] shows the path from variable X3 through variable X0 to variable X1. | Python Code:
import numpy as np
import pandas as pd
import graphviz
import lingam
from lingam.utils import print_causal_directions, print_dagc, make_dot
import warnings
warnings.filterwarnings('ignore')
print([np.__version__, pd.__version__, graphviz.__version__, lingam.__version__])
np.set_printoptions(precision=3, suppress=True)
Explanation: BottomUpParceLiNGAM
Import and settings
In this example, we need to import numpy, pandas, and graphviz in addition to lingam.
End of explanation
np.random.seed(1000)
x6 = np.random.uniform(size=1000)
x3 = 2.0*x6 + np.random.uniform(size=1000)
x0 = 0.5*x3 + np.random.uniform(size=1000)
x2 = 2.0*x6 + np.random.uniform(size=1000)
x1 = 0.5*x0 + 0.5*x2 + np.random.uniform(size=1000)
x5 = 0.5*x0 + np.random.uniform(size=1000)
x4 = 0.5*x0 - 0.5*x2 + np.random.uniform(size=1000)
# The latent variable x6 is not included.
X = pd.DataFrame(np.array([x0, x1, x2, x3, x4, x5]).T, columns=['x0', 'x1', 'x2', 'x3', 'x4', 'x5'])
X.head()
m = np.array([[0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0],
[0.5, 0.0,-0.5, 0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
dot = make_dot(m)
# Save pdf
dot.render('dag')
# Save png
dot.format = 'png'
dot.render('dag')
dot
Explanation: Test data
First, we generate a causal structure with 7 variables. Then we create a dataset with 6 variables from x0 to x5, with x6 being the latent variable for x2 and x3.
End of explanation
model = lingam.BottomUpParceLiNGAM()
model.fit(X)
Explanation: Causal Discovery
To run causal discovery, we create a BottomUpParceLiNGAM object and call the fit method.
End of explanation
model.causal_order_
Explanation: Using the causal_order_ properties, we can see the causal ordering as a result of the causal discovery. x2 and x3, which have latent confounders as parents, are stored in a list without causal ordering.
End of explanation
model.adjacency_matrix_
Explanation: Also, using the adjacency_matrix_ properties, we can see the adjacency matrix as a result of the causal discovery. The coefficients between variables with latent confounders are np.nan.
End of explanation
make_dot(model.adjacency_matrix_)
Explanation: We can draw a causal graph by utility funciton.
End of explanation
p_values = model.get_error_independence_p_values(X)
print(p_values)
Explanation: Independence between error variables
To check if the LiNGAM assumption is broken, we can get p-values of independence between error variables. The value in the i-th row and j-th column of the obtained matrix shows the p-value of the independence of the error variables $e_i$ and $e_j$.
End of explanation
import warnings
warnings.filterwarnings('ignore', category=UserWarning)
model = lingam.BottomUpParceLiNGAM()
result = model.bootstrap(X, n_sampling=100)
Explanation: Bootstrapping
We call bootstrap() method instead of fit(). Here, the second argument specifies the number of bootstrap sampling.
End of explanation
cdc = result.get_causal_direction_counts(n_directions=8, min_causal_effect=0.01, split_by_causal_effect_sign=True)
Explanation: Causal Directions
Since BootstrapResult object is returned, we can get the ranking of the causal directions extracted by get_causal_direction_counts() method. In the following sample code, n_directions option is limited to the causal directions of the top 8 rankings, and min_causal_effect option is limited to causal directions with a coefficient of 0.01 or more.
End of explanation
print_causal_directions(cdc, 100)
Explanation: We can check the result by utility function.
End of explanation
dagc = result.get_directed_acyclic_graph_counts(n_dags=3, min_causal_effect=0.01, split_by_causal_effect_sign=True)
Explanation: Directed Acyclic Graphs
Also, using the get_directed_acyclic_graph_counts() method, we can get the ranking of the DAGs extracted. In the following sample code, n_dags option is limited to the dags of the top 3 rankings, and min_causal_effect option is limited to causal directions with a coefficient of 0.01 or more.
End of explanation
print_dagc(dagc, 100)
Explanation: We can check the result by utility function.
End of explanation
prob = result.get_probabilities(min_causal_effect=0.01)
print(prob)
Explanation: Probability
Using the get_probabilities() method, we can get the probability of bootstrapping.
End of explanation
causal_effects = result.get_total_causal_effects(min_causal_effect=0.01)
# Assign to pandas.DataFrame for pretty display
df = pd.DataFrame(causal_effects)
labels = [f'x{i}' for i in range(X.shape[1])]
df['from'] = df['from'].apply(lambda x : labels[x])
df['to'] = df['to'].apply(lambda x : labels[x])
df
Explanation: Total Causal Effects
Using the get_total_causal_effects() method, we can get the list of total causal effect. The total causal effects we can get are dictionary type variable.
We can display the list nicely by assigning it to pandas.DataFrame. Also, we have replaced the variable index with a label below.
End of explanation
df.sort_values('effect', ascending=False).head()
df.sort_values('probability', ascending=True).head()
Explanation: We can easily perform sorting operations with pandas.DataFrame.
End of explanation
df[df['to']=='x1'].head()
Explanation: And with pandas.DataFrame, we can easily filter by keywords. The following code extracts the causal direction towards x1.
End of explanation
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
%matplotlib inline
from_index = 0 # index of x0
to_index = 5 # index of x5
plt.hist(result.total_effects_[:, to_index, from_index])
Explanation: Because it holds the raw data of the total causal effect (the original data for calculating the median), it is possible to draw a histogram of the values of the causal effect, as shown below.
End of explanation
from_index = 3 # index of x3
to_index = 1 # index of x0
pd.DataFrame(result.get_paths(from_index, to_index))
Explanation: Bootstrap Probability of Path
Using the get_paths() method, we can explore all paths from any variable to any variable and calculate the bootstrap probability for each path. The path will be output as an array of variable indices. For example, the array [3, 0, 1] shows the path from variable X3 through variable X0 to variable X1.
End of explanation |
14,748 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
BASIC CONCEPTS TO REMEMBER
Step1: USING EXPRESSIONS AS INDICES
Step2: List slices
Step3: List comprehensions
Step4: modular operator
Step5: String operations
Step6: string indexes and slices
Step7: Finding substrings
Step8: String transformations
Step9: A difficult problem
Step10: Regular expressions
Step11: Metacharecters
Step12: Metachars
. Any character
\w Any alphanumeric (a-z, A-Z, 01, etc)
\s aby whitespace character (" ", \n, \t)
\s abny non-whitespace
any digit (0 9)
Step13: Metachar 2
Step14: aside
Step15: Metacharacters 3
Step16: More metacharacters
Step17: Capturing
read the whole corpus in as one big string | Python Code:
x = [5, 10, 15, 20, 25, 30]
x[3]
[2, 4, 6, 8, 10][4]
#Index Beyond list #this is suppose to go wrong
x[90]
type(x)
type(x[0])
#type of list values can be diffetent from the list.
len([10])
#empty list is an starting point
len([])
len([])
max(x), sum(x)
sorted([x])
#bring is it to you in order
range(0,10)
list(range(10))
#bring you a list into the range ordered
list("THIS IS A TEST")
x[-1]
Explanation: BASIC CONCEPTS TO REMEMBER
End of explanation
x[3]
n = 1 + 2
x[n]
x[2*2]
##negatives indices
x[-2]
Explanation: USING EXPRESSIONS AS INDICES
End of explanation
x
x[1:4]
n = 2
x[n:n+3]
x[-3:-1]
x[3:9000]
type(x[1:4])
for item in x[2:5]:
print(item)
x[:4] #from the beggining to the end
x[4:] #from the integer 4 to the end
x[-3:]
Explanation: List slices
End of explanation
x
#What lists are for?
#list----> transformation----> List
#List----> Filter------> List
source1 = [3, -1, 4, -2, 5, -3, 6]
dest = []
for item in source1:
if item > 0:
dest.append(item * item)
dest
ark = ["aardvark", "badger", "crocodile", "dingo", "emu", "flamingo"]
zoo = []
for item in ark:
if len(item) <= 6:
zoo.append(item)
zoo
[item for item in ark if int(item) <= 6]
[item * item for item in source1 if item > 0]
##do the same example
x
#for loops
stuff = []
for item in x:
stuff.append(item -3)
stuff
#simpler things can be do with list comprehentions
[item - 3 for item in x]
source = [3, -1, 4, -2, 5, -3, 6]
Explanation: List comprehensions
End of explanation
60 % 5
60 % 7
5 % 2
6 % 2
dest = []
for i in range(10):
if i % 2 == 0:
dest.append(i*i)
dest
[i*i for i in range(10) if i % 2 == 0]
##a slighty more practical example
rawdata = "2,3,5,7,11,13,17,19,23"
values = rawdata.split(",")
int("17")
int("-17")
list_numbers = []
num_values = [int(i) for i in values]
# integer are numbers
#strings are characters
sum(num_values)
Explanation: modular operator: %
End of explanation
#the in operator
"foo" in "buffoon"
ark = ["aardvark", "badger", "crocodile", "dingo", "emu", "flamingo"]
[animal for animal in ark if 'a' in animal]
for animal in ark:
if 'a' in animal:
print(animal)
dest = []
for animal in ark:
if 'a' in animal:
dest.append(animal)
dest
check = "foodie"
check.startswith("foo")
check.isdigit()
number_str ="112325" #FOR STRINGS
number_str.isdigit()
check.isupper()
yelling = "I LIKE BIG BUTTS AND I CANNOT LIE"
yelling.isupper() #FOR CAPITAL LETTERS
Explanation: String operations
End of explanation
message = "bugalow"
message[3:-2]
src.find("lose") #There is no such a thing in the string
Explanation: string indexes and slices
End of explanation
src = "Now is the winter of our discontent"
src.find("win") #this tell us that win is find in the 11 index
location = src.find("win")
src[location:]
location = src.find("e")
if location != -1:
print(src[location:])
src.count("is")
for vowel in ['a', 'e', 'i', 'o', 'u']:
print(vowel, src.count(vowel))
my_is_patterns = ["is", "is", "is", "is", "is", "is" ]
for item in my_is_patterns:
print(item, src.count(item))
Explanation: Finding substrings
End of explanation
commet = "ARGUMENTATION! DESAFREEMENT! STRIFE!"
commet.upper()
str1 = "dog"
str2 = "Dog"
str1 == str2
str1.lower() == str2.lower()
movie2 = "rosemary's baby"
movie2.title()
rawtext = " wierd extra spaces before and after "
rawtext.strip()
line = "hello / is it me \n you are looking for"
print(line.strip())
song = "I got rythm, I got music, I got my man, who could ask for anything more"
song.replace("I got ", "I used to have ")
song.replace("I got", "someday I will have").replace("anything more", "a future more bright")
rawdata = "Get data that<br>looks like this<br>because it was<br>too much time"
Explanation: String transformations
End of explanation
input_str = "Yes, my zip code is 12345. I heard that Gary's zip code is 23456. But 212 is not zip codes"
#results in a list of strings that has the codes
current = ""
zips = []
for char in input_str:
if char.isdigit():
current += char
else:
current= ""
if len(current) == 5:
zips.append(current)
current = ""
zips
#but does not work for larger numbers
Explanation: A difficult problem
End of explanation
import re
zips = re.findall(r"\d{5}", input_str)
zips
import urllib
from urllib.request import urlretrieve
urlretrieve("https://raw.githubusercontent.com/ledeprogram/courses/master/databases/data/enronsubjects.txt", "enronsubjects.txt")
subjects = [x.strip() for x in open("enronsubjects.txt").readlines()]
import re
[line for line in subjects if re.search("shipping", line)]
[item for item in subjects if re.search("shipping", item)] #it works for other words
Explanation: Regular expressions
End of explanation
#special characters that you can use in regular expressions that have a special meaning
[item for item in subjects if re.search("sh.pping", item)]
Explanation: Metacharecters
End of explanation
[item for item in subjects if re.search("\d.\d\d\wm", item)]
# subjet lines that have dates, e.g 12/01/99
[line for line in subjects if re.search("\d\d/\d\d", line)]
[line for line in subjects if re.search("[aeiou][aeiou][aeiou][aeiou]", line)]
[line for line in subjects if re.search("F[wW]:", line)] #This can be useful for checking data
#inside emails.
[line for line in subjects if re.search("\d:[012345]\d[apAP][mM]", line)]
Explanation: Metachars
. Any character
\w Any alphanumeric (a-z, A-Z, 01, etc)
\s aby whitespace character (" ", \n, \t)
\s abny non-whitespace
any digit (0 9)
End of explanation
[line for line in subjects if re.search("^New York", line)]
[line for line in subjects if re.search("!!!!!$", line)]
[line for line in subjects if re.search("oil", line)] #doesnt work
[line for line in subjects if re.search(" oil ", line)] #works but not for the ones that are at the end of the string
[line for line in subjects if re.search(r"\boil\b", line)]
Explanation: Metachar 2 : Anchors
^ begging of the string
$ end of string
\b word boundery
End of explanation
#Another way to do it is using raw expression before the data (r)
x = "this is\na test"
print(x)
X = "this is\t\t\tanotther test"
print(X)
[line for line in subjects if re.search("\\boil\\b", line)] #tell python interpret \\ as writing \b
normal = "hello\nthere"
raw = r"hello\nthere"
print("normal:", normal)
print('raw:', raw)
[line for line in subjects if re.search(r"\b\.\.\.\b", line)]
Explanation: aside: metacharacters and escape characters
End of explanation
[line for line in subjects if re.search(r"^F[wW]d", line)]
[line for line in subjects if re.search(r"^F[wW]d?:", line)] #means that if that char is missinf then is ok
[line for line in subjects if re.search(r"[nN]ews.*!$", line)]
[line for line in subjects if re.search(r"^R[eE]:.*\b[iI]nvestor", line)]
Explanation: Metacharacters 3: quantifiers
{n} matches exatly n times
{n, matches at least n times, but no more than n times
{n,} matches at leas n times, but maybe infinite times
"+" match at least once ({1,})
"*" match zoero or more times
? match one time or zero times
End of explanation
[line for line in subjects if re.search(r"\b(?:energy|oil|slectricity)\b")]
Explanation: More metacharacters: alternation
(?:x|y) match either x or y #(?:x|y]z) match x, y, or z
End of explanation
all_subjects = open("enronsubjects.txt").read()
all_subjects[:1000]
#domain names: foo.org, cheese.net, stuff.com
re.findall(r"\b\.(/:com|net|org)\b", all_subjects)
[line for line in subjects if re.search(r"\b\w+\.(?:com|net|org)\b", line)]
[line for line in subjects if re.findall(r"\b\w+\.(?:com|net|org)\b", line)]
input_str = "21345-12343 34541213 askdjalfiwejofij 123 asdkjoi 989735"
re.findall(r"\b\d{5}\b", input_str) #Cool to find out zip codes
re.findall(r"New York \b\w+\b", all_subjects)
re.findall(r"New York (\b\w+\b) (\b\w+\b)", all_subjects)
src = "this exaple has been used 423 times"
if re.search(r"\d\d\d\d", src):
print("yup")
else:
print("nope")
_sre.SRE_Match
print(match.start())
print(match.end())
for line in subjects:
match = re.search(r"(A-Z)", line)
if match:
print(match.group())
#this need to be use for the last one of the homework!
courses = [
"CSCI 105: Introductory Programming for Cat-Lovers",
"LING 214: Pronouncing Things Backwards",
"ANTHRO 342: Theory and Practice of Cheesemongery (Graduate Seminar)",
"CSCI 205: Advanced Programming for Cat-Lovers",
"ENGL 112: Speculative Travel Writing"
]
print("Course catolog report")
for item in courses:
match = re.search(r"^(\w+) (\d+): (.*)$", item)
print(match.group(1))
print(match.group(2))
print(match.group(3))
Explanation: Capturing
read the whole corpus in as one big string
End of explanation |
14,749 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Titanic Survival with DNN
Predicting survival on the Titanic using an artificial neural network in Keras
Supervised Learning. Binary classification
This project is based on a dataset containing demographics and passenger information from 891 of the 2224 passengers and crew on board the Titanic. A description of this dataset is on the Kaggle website, where the data was obtained.
Step1: 1. Data Processing and Exploratory Data Analysis
Step2: Show the data
Step3: Numerical Data
Step4: Non-numerical Data
Step5: Missing values
Step7: Binary target "Survived"
Step9: Remove irrelevant features
Step10: Classify variables
Change categorical variables as dtype 'categorical' and sort columns
Step11: Visualize the data
Categorical features
Step12: Target vs Categorical features
Step13: Numerical features
Step14: Target vs numerical features
Step15: Correlation between numerical features and target
Step16: Most relevant features
Step18: Unlike in third class, most children and women in first and second classes survived.
Fill missing values
Step19: 2. Neural Network model
Select the features
Step20: Scale numerical variables
Shift and scale numerical variables to a standard normal distribution. The scaling factors are saved to be used for predictions.
Step21: Create dummy features
Replace categorical features (no target) with dummy features
Step22: Split the data into training and test sets
Data leakage
Step23: One-hot encode the output
Step24: Build the Neural Network for Binary Classification
Step26: Train the Neural Network
Step28: Train with Cross Validation
Step29: Evaluate the model
Step31: Make predictions
Step32: The results predicted from the model confirm the impact of the sex for the survival probabilities, as well as the class for the survival of women and children.
Compare with non-enhanced features
Step33: Compare removing outliers
Step34: Compare with non-neural network models
XGBoost
Step35: Classical Machine Learning
Step36: Best tree-based model
Step37: Feature importances | Python Code:
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import helper
import keras
helper.info_gpu()
helper.reproducible(seed=0) # Setup reproducible results from run to run using Keras
%matplotlib inline
Explanation: Titanic Survival with DNN
Predicting survival on the Titanic using an artificial neural network in Keras
Supervised Learning. Binary classification
This project is based on a dataset containing demographics and passenger information from 891 of the 2224 passengers and crew on board the Titanic. A description of this dataset is on the Kaggle website, where the data was obtained.
End of explanation
data_path = 'data/titanic_data.csv'
target = ['Survived'] # the target will remain the same throughout the notebook
df_original = pd.read_csv(data_path)
print("{} rows \n{} columns \ntarget: {}".format(*df_original.shape, target))
Explanation: 1. Data Processing and Exploratory Data Analysis
End of explanation
df_original.head(3)
Explanation: Show the data
End of explanation
df_original.describe(percentiles=[0.5])
Explanation: Numerical Data
End of explanation
df_original.describe(include=['O'])
Explanation: Non-numerical Data
End of explanation
helper.missing(df_original)
Explanation: Missing values
End of explanation
df = df_original.copy() # modified dataset
def enhance_features(df, dict_categories=None):
Enhance dataframe df
df = df.copy()
# filter Cabin to first letter
df["Cabin"] = df["Cabin"].str[0]
# get Title from Name
df['Title'] = df["Name"].str.extract('([A-Za-z]+)\.', expand=False)
# remove low frequency values for the new feautres
fields = ['Cabin', 'Title']
df, dict_categories = helper.remove_categories(df, target=target, show=False)
# Alone passenger
df['Alone'] = ((df["SibSp"] + df["Parch"]) == 0).astype(int)
return df, dict_categories
df, dict_categories = enhance_features(df)
Explanation: Binary target "Survived": ~38% ones; F1 score won't be used <br>
Some values are missing for key values (e.g. Age)
Some features (e.g. PassengerID, Name, Ticket) seem irelevant to survival probabilities <br>
Transform the data
Enhance and add new features
End of explanation
def drop_irrelevant_features(df, inplace=False):
Remove non-relevant columns from dataftame df (inplace)
if not inplace:
df = df.copy()
df.drop(['PassengerId', 'Name', 'Ticket'], axis='columns', inplace=True)
if not inplace:
return df
drop_irrelevant_features(df, inplace=True)
Explanation: Remove irrelevant features
End of explanation
df = helper.classify_data(df, target, numerical=["Age", "SibSp", "Parch", "Fare"])
pd.DataFrame(dict(df.dtypes), index=["Type"])[df.columns].head() # show data types
Explanation: Classify variables
Change categorical variables as dtype 'categorical' and sort columns: numerical + categorical + target
End of explanation
helper.show_categorical(df, target=target, sharey=True)
Explanation: Visualize the data
Categorical features
End of explanation
helper.show_target_vs_categorical(df, target)
plt.ylim([0, 1]);
Explanation: Target vs Categorical features
End of explanation
helper.show_numerical(df, kde=True)
Explanation: Numerical features
End of explanation
helper.show_target_vs_numerical(df, target, jitter=0.2)
plt.ylim([-0.4, 1.4])
plt.yticks([0, 1]);
#df.groupby('Survived')['Age'].hist(alpha=0.4)
# helper.show_target_vs_numerical(df_3sigma, target, numerical, jitter=0.2)
Explanation: Target vs numerical features
End of explanation
helper.show_correlation(df, target)
Explanation: Correlation between numerical features and target
End of explanation
sns.FacetGrid(
df, row="Sex", col="Pclass", hue="Survived", size=3, margin_titles=True).map(
plt.hist, "Age", alpha=.7).add_legend()
plt.ylim([0, 70]);
# df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean().sort_values(
# by='Survived', ascending=False)
# helper.show_target_vs_categorical(df.loc[(df['Age']<12) | (df['Sex']=='female')],
# target, categorical)
Explanation: Most relevant features
End of explanation
helper.missing(df)
plt.figure(figsize=(7, 3))
sns.countplot(data=df, x='Pclass', hue='Cabin');
helper.show_target_vs_categorical(df, ['Age'], figsize=(17, 2)) # Age vs categorical
def fill_missing_values(df, inplace=False):
Fill missing values of the dataframe df
if not inplace:
df = df.copy()
# fill Embarked with mode
df['Embarked'].fillna(df['Embarked'].mode()[0], inplace=True)
# fill Cabin: the mode for grouped Pclass and Embarked
ref = df.groupby(['Pclass', 'Embarked'])['Cabin'].transform(lambda x: x.mode()[0])
df['Cabin'].fillna(ref.iloc[0], inplace=True)
# fill Age: the median for grouped Pclass and Title
ref = df.groupby(['Pclass', 'Title'])['Age'].transform('median')
df['Age'].fillna(ref, inplace=True)
# fill Title: by age and sex only (not spouse or job)
# df.loc[df['Title']=='Master','Age'].unique()
# for idx, row in df.iterrows():
# if (pd.isnull(row['Title'])):
# if row['Age'] >= 13:
# if row['Sex'] == 'male':
# df.loc[idx, 'Title'] = "Mr"
# else:
# df.loc[idx, 'Title'] = "Mrs"
# else:
# if row['Sex'] == 'male':
# df.loc[idx, 'Title'] = "Master"
# else:
# df.loc[idx, 'Title'] = "Miss"
# fill missing categorical values with the mode (if any)
categorical = list(df.select_dtypes(include=['category']))
modes = df[categorical].mode() # this solves fillna issue with mode()
for idx, f in enumerate(df[categorical]):
df[f].fillna(modes.iloc[0, idx], inplace=True)
# fill missing numeric NaN values with the median (if any)
df.fillna(df.median(), inplace=True)
if not inplace:
return df
# bins = list(range(0,80,10))
# # bins = (0, 5, 10, 15, 20, 30, 40, 50, 60)
# labels = ["{}-{}".format(i, j) for i,j in zip(bins[:-1],bins[:-1])]
# df['Age_cat'] = pd.cut(df['Age'], bins, labels=labels).astype('category')
# df = df.drop(['Age'], axis='columns')
fill_missing_values(df, inplace=True)
Explanation: Unlike in third class, most children and women in first and second classes survived.
Fill missing values
End of explanation
droplist = [] # features to drop from the model
# For the model 'data' instead of 'df'
data = df.copy()
df.drop(droplist, axis='columns', inplace=True)
data.head(3)
Explanation: 2. Neural Network model
Select the features
End of explanation
data, scale_param = helper.scale(data)
Explanation: Scale numerical variables
Shift and scale numerical variables to a standard normal distribution. The scaling factors are saved to be used for predictions.
End of explanation
data, dict_dummies = helper.replace_by_dummies(data, target)
model_features = [f for f in data if f not in target] # sorted neural network inputs
data.head(3)
Explanation: Create dummy features
Replace categorical features (no target) with dummy features
End of explanation
from sklearn.model_selection import train_test_split
def split(data, target, test_size=0.15):
train, test = train_test_split(data, test_size=test_size, random_state=9,
stratify=data[target])
# Separate the data into features and target (x=features, y=target)
x_train, y_train = train.drop(target, axis=1).values, train[target].values
x_test, y_test = test.drop(target, axis=1).values, test[target].values
# _nc: non-categorical yet (needs one-hot encoding)
return x_train, y_train, x_test, y_test
x_train, y_train, x_test, y_test = split(data, target, test_size=0.2)
Explanation: Split the data into training and test sets
Data leakage: Test set hidden when training the model, but seen when preprocessing the dataset
End of explanation
def one_hot_output(y_train, y_test):
num_classes = len(np.unique(y_train))
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return y_train, y_test
y_train, y_test = one_hot_output(y_train, y_test)
print("train size \t X:{} \t Y:{}".format(x_train.shape, y_train.shape))
print("test size \t X:{} \t Y:{} ".format(x_test.shape, y_test.shape))
Explanation: One-hot encode the output
End of explanation
from keras.models import Sequential
from keras.layers.core import Dense, Dropout
def build_nn(input_size, output_size, summary=False):
input_nodes = input_size
weights = keras.initializers.RandomNormal(stddev=0.001)
leaky_relu = keras.layers.advanced_activations.LeakyReLU(alpha=0.01)
model = Sequential()
model.add(
Dense(
input_nodes,
input_dim=input_size,
kernel_initializer=weights,
activation='relu',
bias_initializer='zero'))
model.add(leaky_relu)
model.add(Dropout(.3))
model.add(
Dense(
output_size,
activation='softmax',
kernel_initializer=weights,
bias_initializer='zero'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
if summary:
model.summary()
return model
model = build_nn(x_train.shape[1], y_train.shape[1], summary=True)
Explanation: Build the Neural Network for Binary Classification
End of explanation
from time import time
model_path = os.path.join("models", "titanic.h5")
def train_nn(model, x_train, y_train, validation_data=None, path=False, show=True):
Train the neural network model. If no validation_data is provided, a split for validation
will be used
if show:
print('Training ....')
callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', patience=1, verbose=0)]
t0 = time()
history = model.fit(
x_train,
y_train,
epochs=1000,
batch_size=64,
verbose=0,
validation_split=0.25,
validation_data = validation_data,
callbacks=callbacks)
if show:
print("time: \t {:.1f} s".format(time() - t0))
helper.show_training(history)
if path:
model.save(path)
print("\nModel saved at", path)
return history
model = None
model = build_nn(x_train.shape[1], y_train.shape[1], summary=False)
train_nn(model, x_train, y_train, path=model_path);
Explanation: Train the Neural Network
End of explanation
from sklearn.model_selection import StratifiedKFold
def cv_train_nn(x_train, y_train, n_splits):
Create and Train models for cross validation. Return best model
skf = StratifiedKFold(n_splits=n_splits, shuffle=True)
score = []
best_model = None
best_acc = 0
print('Training {} models for Cross Validation ...'.format(n_splits))
for train, val in skf.split(x_train[:, 0], y_train[:, 0]):
model = None
model = build_nn(x_train.shape[1], y_train.shape[1], summary=False)
history = train_nn(
model,
x_train[train],
y_train[train],
show=False,
validation_data=(x_train[val], y_train[val]))
val_acc = history.history['val_acc'][-1]
score.append(val_acc)
if val_acc > best_acc: # save best model (fold) for evaluation and predictions
best_model = model
best_acc = val_acc
model = best_model
print('\nCross Validation accuracy: {:.3f}'.format(np.mean(score)))
return best_model
model = cv_train_nn(x_train, y_train, 4)
Explanation: Train with Cross Validation
End of explanation
def evaluate_nn(model, x_test, y_test):
score = model.evaluate(x_test, y_test, verbose=0)
print("Test Accuracy: {:.3f}".format(score[1]))
#model = keras.models.load_model(model_path)
evaluate_nn(model, x_test, y_test)
y_pred = model.predict(x_test, verbose=2)
helper.binary_classification_scores(
y_test[:, 1], y_pred[:, 1], return_dataframe=True, index="Neural Network")
Explanation: Evaluate the model
End of explanation
def predict_manual(new_df):
input: custom dataframe
new_data = new_df.copy()
# force data types to previous dataframe df
for col in new_data:
new_data[col] = new_data[col].astype(df.dtypes[col])
# standardize numerical variables
new_data, _ = helper.scale(new_data, scale_param)
# replace categorical features by dummy variables (using existing dummies)
new_data, _ = helper.replace_by_dummies(new_data, target, dict_dummies)
# sort columns to match with manual entries
new_data = new_data[model_features] ## model_features: sorted list used in the model
# make predictions
prediction = model.predict(new_data.values)[:, 1]
return (prediction)
# for index, row in new_data.iterrows():
# single_pred = model.predict(np.array([row]))
# print('{}:\t {:.0f}%'.format(index,single_pred[0,1] * 100))
# input data format
df.describe()
df.describe(include=['category'])
print(list(df))
new_passengers = {
'Average man': [26, 1, 0, 14, 2, 'male', 'C', 'S', 'Mr', 0],
'Average woman': [26, 1, 0, 14, 2, 'female', 'C', 'S', 'Mrs', 0],
'Alone woman 3c': [26, 0, 2, 8, 3, 'female', 'C', 'S', 'Miss', 1],
'Boy 1c ': [7, 0, 2, 31, 1, 'male', 'C', 'S', 'Master', 0],
'Boy 2c ': [7, 0, 2, 14, 2, 'male', 'C', 'S', 'Master', 0],
'Boy 3c ': [7, 0, 2, 8, 3, 'male', 'C', 'S', 'Master', 0],
}
# create a dataframe with the new data
new_df = pd.DataFrame(
data=list(new_passengers.values()),
index=new_passengers.keys(),
columns= [f for f in list(df) if f not in target])
prediction = predict_manual(new_df)
new_df['Survival prob. (%)'] = (prediction * 100).astype(int)
new_df
Explanation: Make predictions
End of explanation
# Same dataset without:
# enhancing features
# adding new features
# filling missing values using grouped median
def non_enhanced_pipeline(df):
df = df.copy()
# select features & classify features
df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis='columns', inplace=True)
df = helper.classify_data(df, target, numerical=["Age", "SibSp", "Parch", "Fare"])
# fill NaN
df.fillna(df.median(), inplace=True)
# standardize and create dummies
data, _ = helper.scale(df)
data, _ = helper.replace_by_dummies(data, target)
# split and one-hot output
x_train, y_train, x_test, y_test = split(data, target, test_size=0.15)
y_train, y_test = one_hot_output(y_train, y_test)
# build, train and evaluate model
model = build_nn(x_train.shape[1], y_train.shape[1], summary=False)
train_nn(model, x_train, y_train, path=False, show=False)
evaluate_nn(model, x_test, y_test)
non_enhanced_pipeline(df_original)
Explanation: The results predicted from the model confirm the impact of the sex for the survival probabilities, as well as the class for the survival of women and children.
Compare with non-enhanced features
End of explanation
def remove_outliers_peline(df):
df = df.copy()
# transform features
df, dict_categories = enhance_features(df)
# select features & classify features
df.drop(['PassengerId', 'Name', 'Ticket'], axis='columns', inplace=True)
df = helper.classify_data(df, target, numerical=["Age", "SibSp", "Parch", "Fare"])
# remove outliers
helper.remove_outliers(df, inplace=True) # remove default values above 3 times std
# fill missing values (enhanced)
fill_missing_values(df, inplace=True)
# standardize and create dummies
data, _ = helper.scale(df)
data, _ = helper.replace_by_dummies(data, target)
# split and one-hot output
x_train, y_train, x_test, y_test = split(data, target, test_size=0.15)
y_train, y_test = one_hot_output(y_train, y_test)
# build, train and evaluate model
model = build_nn(x_train.shape[1], y_train.shape[1], summary=False)
train_nn(model, x_train, y_train, path=False, show=False)
evaluate_nn(model, x_test, y_test)
remove_outliers_peline(df_original)
Explanation: Compare removing outliers
End of explanation
import warnings
warnings.filterwarnings("ignore")
helper.XGBClassifier(
x_train, y_train[:,1], x_test, y_test[:,1], max_depth=4, n_estimators=400, learning_rate=0.1)
Explanation: Compare with non-neural network models
XGBoost
End of explanation
# enhanced features
helper.ml_classification(x_train, y_train[:,1], x_test, y_test[:,1])
from sklearn.ensemble import RandomForestClassifier
clf_random_forest = RandomForestClassifier(n_estimators = 30,
max_depth=13, class_weight='balanced', n_jobs=-1,
random_state=0).fit(x_train, np.ravel(y_train[:,1]))
Explanation: Classical Machine Learning
End of explanation
y_pred = clf_random_forest.predict(x_test).reshape([-1, 1])
helper.binary_classification_scores(
y_test[:, 1], y_pred, return_dataframe=True, index="Random Forest")
Explanation: Best tree-based model
End of explanation
re = helper.feature_importances(model_features, clf_random_forest)
Explanation: Feature importances
End of explanation |
14,750 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Facies classification using Machine Learning- Majority voting
Contest entry by Priyanka Raghavan and Steve Hall
This notebook demonstrates how to train a machine learning algorithm to predict facies from well log data. The dataset we will use comes from a class excercise from The University of Kansas on Neural Networks and Fuzzy Systems. This exercise is based on a consortium project to use machine learning techniques to create a reservoir model of the largest gas fields in North America, the Hugoton and Panoma Fields. For more info on the origin of the data, see Bohling and Dubois (2003) and Dubois et al. (2007).
The dataset we will use is log data from nine wells that have been labeled with a facies type based on oberservation of core. We will use this log data to train a support vector machine to classify facies types. We will use simple logistics regression to classify wells scikit-learn.
First we will explore the dataset. We will load the training data from 9 wells, and take a look at what we have to work with. We will plot the data from a couple wells, and create cross plots to look at the variation within the data.
Next we will condition the data set. We will remove the entries that have incomplete data. The data will be scaled to have zero mean and unit variance. We will also split the data into training and test sets.
We will then be ready to build the classifier.
Finally, once we have a built and tuned the classifier, we can apply the trained model to classify facies in wells which do not already have labels. We will apply the classifier to two wells, but in principle you could apply the classifier to any number of wells that had the same log data.
Exploring the dataset
First, we will examine the data set we will use to train the classifier. The training data is contained in the file facies_vectors.csv. The dataset consists of 5 wireline log measurements, two indicator variables and a facies label at half foot intervals. In machine learning terminology, each log measurement is a feature vector that maps a set of 'features' (the log measurements) to a class (the facies type). We will use the pandas library to load the data into a dataframe, which provides a convenient data structure to work with well log data.
Step1: This data is from the Council Grove gas reservoir in Southwest Kansas. The Panoma Council Grove Field is predominantly a carbonate gas reservoir encompassing 2700 square miles in Southwestern Kansas. This dataset is from nine wells (with 4149 examples), consisting of a set of seven predictor variables and a rock facies (class) for each example vector and validation (test) data (830 examples from two wells) having the same seven predictor variables in the feature vector. Facies are based on examination of cores from nine wells taken vertically at half-foot intervals. Predictor variables include five from wireline log measurements and two geologic constraining variables that are derived from geologic knowledge. These are essentially continuous variables sampled at a half-foot sample rate.
The seven predictor variables are
Step2: This is a quick view of the statistical distribution of the input variables. Looking at the count values, there are 3232 feature vectors in the training set.
Remove a single well to use as a blind test later. For that let us look at distribution of facies across wells
Step3: Based on graphs above NEWBY has a good distribution of wells and is taken out as blind well to be tested. Also since training data has null, remove them from data.
Step4: Let's take a look at the data from individual wells in a more familiar log plot form. We will create plots for the five well log variables, as well as a log for facies labels. The plots are based on the those described in Alessandro Amato del Monte's excellent tutorial.
Step5: Placing the log plotting code in a function will make it easy to plot the logs from multiples wells, and can be reused later to view the results when we apply the facies classification model to other wells. The function was written to take a list of colors and facies labels as parameters.
We then show log plots for wells SHRIMPLIN.
Step6: In addition to individual wells, we can look at how the various facies are represented by the entire training set. Let's plot a histogram of the number of training examples for each facies class.
Step7: This shows the distribution of examples by facies for the examples in the training set. Dolomite (facies 7) has the fewest with 81 examples. Depending on the performance of the classifier we are going to train, we may consider getting more examples of these facies.
Conditioning the data set
Now we extract just the feature variables we need to perform the classification. The predictor variables are the five wireline values and two geologic constraining variables. We also get a vector of the facies labels that correspond to each feature vector.
Step8: Scikit includes a preprocessing module that can 'standardize' the data (giving each variable zero mean and unit variance, also called whitening). Many machine learning algorithms assume features will be standard normally distributed data (ie
Step9: Scikit also includes a handy function to randomly split the training data into training and test sets. The test set contains a small subset of feature vectors that are not used to train the network. Because we know the true facies labels for these examples, we can compare the results of the classifier to the actual facies and determine the accuracy of the model. Let's use 20% of the data for the test set.
Step10: Training the classifier using Majority voting
Now we use the cleaned and conditioned training set to create a facies classifier. As mentioned above, we will use a type of machine learning model known as a Majority voting.
We trained classifier on four models KNeighbours, Random forest, logistic regression and Gradient boosting.
Step11: Now that the model has been trained on our data, we can use it to predict the facies of the feature vectors in the test set. Because we know the true facies labels of the vectors in the test set, we can use the results to evaluate the accuracy of the classifier.
We need some metrics to evaluate how good our classifier is doing. A confusion matrix is a table that can be used to describe the performance of a classification model. Scikit-learn allows us to easily create a confusion matrix by supplying the actual and predicted facies labels.
The confusion matrix is simply a 2D array. The entries of confusion matrix C[i][j] are equal to the number of observations predicted to have facies j, but are known to have facies i.
To simplify reading the confusion matrix, a function has been written to display the matrix along with facies labels and various error metrics. See the file classification_utilities.py in this repo for the display_cm() function.
Step12: The rows of the confusion matrix correspond to the actual facies labels. The columns correspond to the labels assigned by the classifier. For example, consider the first row. For the feature vectors in the test set that actually have label SS, 23 were correctly indentified as SS, 21 were classified as CSiS and 2 were classified as FSiS.
The entries along the diagonal are the facies that have been correctly classified. Below we define two functions that will give an overall value for how the algorithm is performing. The accuracy is defined as the number of correct classifications divided by the total number of classifications.
Step13: As noted above, the boundaries between the facies classes are not all sharp, and some of them blend into one another. The error within these 'adjacent facies' can also be calculated. We define an array to represent the facies adjacent to each other. For facies label i, adjacent_facies[i] is an array of the adjacent facies labels.
Step14: Using Voting classifier Now
The voting classifier is now used to vote and classify models
Step15: Applying the classification model to the blind data
We held a well back from the training, and stored it in a dataframe called blind
Step16: The label vector is just the Facies column
Step17: We can form the feature matrix by dropping some of the columns and making a new dataframe
Step18: Now we can transform this with the scaler we made before
Step19: Now it's a simple matter of making a prediction and storing it back in the dataframe
Step20: Let's see how we did with the confusion matrix
Step21: We managed 0.46 using the test data, but it was from the same wells as the training data. T
Step22: ...but does remarkably well on the adjacent facies predictions.
Step23: Applying the classification model to new data
Now that we have a trained facies classification model we can use it to identify facies in wells that do not have core data. In this case, we will apply the classifier to two wells, but we could use it on any number of wells for which we have the same set of well logs for input.
This dataset is similar to the training data except it does not have facies labels. It is loaded into a dataframe called test_data.
Step24: The data needs to be scaled using the same constants we used for the training data.
Step25: Finally we predict facies labels for the unknown data, and store the results in a Facies column of the test_data dataframe.
Step26: We can use the well log plot to view the classification results along with the well logs.
Step27: Finally we can write out a csv file with the well data along with the facies classification results. | Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pandas import set_option
set_option("display.max_rows", 10)
pd.options.mode.chained_assignment = None
filename = 'facies_vectors.csv'
training_data = pd.read_csv(filename)
training_data
Explanation: Facies classification using Machine Learning- Majority voting
Contest entry by Priyanka Raghavan and Steve Hall
This notebook demonstrates how to train a machine learning algorithm to predict facies from well log data. The dataset we will use comes from a class excercise from The University of Kansas on Neural Networks and Fuzzy Systems. This exercise is based on a consortium project to use machine learning techniques to create a reservoir model of the largest gas fields in North America, the Hugoton and Panoma Fields. For more info on the origin of the data, see Bohling and Dubois (2003) and Dubois et al. (2007).
The dataset we will use is log data from nine wells that have been labeled with a facies type based on oberservation of core. We will use this log data to train a support vector machine to classify facies types. We will use simple logistics regression to classify wells scikit-learn.
First we will explore the dataset. We will load the training data from 9 wells, and take a look at what we have to work with. We will plot the data from a couple wells, and create cross plots to look at the variation within the data.
Next we will condition the data set. We will remove the entries that have incomplete data. The data will be scaled to have zero mean and unit variance. We will also split the data into training and test sets.
We will then be ready to build the classifier.
Finally, once we have a built and tuned the classifier, we can apply the trained model to classify facies in wells which do not already have labels. We will apply the classifier to two wells, but in principle you could apply the classifier to any number of wells that had the same log data.
Exploring the dataset
First, we will examine the data set we will use to train the classifier. The training data is contained in the file facies_vectors.csv. The dataset consists of 5 wireline log measurements, two indicator variables and a facies label at half foot intervals. In machine learning terminology, each log measurement is a feature vector that maps a set of 'features' (the log measurements) to a class (the facies type). We will use the pandas library to load the data into a dataframe, which provides a convenient data structure to work with well log data.
End of explanation
training_data['Well Name'] = training_data['Well Name'].astype('category')
training_data['Formation'] = training_data['Formation'].astype('category')
training_data['Well Name'].unique()
training_data.describe()
Explanation: This data is from the Council Grove gas reservoir in Southwest Kansas. The Panoma Council Grove Field is predominantly a carbonate gas reservoir encompassing 2700 square miles in Southwestern Kansas. This dataset is from nine wells (with 4149 examples), consisting of a set of seven predictor variables and a rock facies (class) for each example vector and validation (test) data (830 examples from two wells) having the same seven predictor variables in the feature vector. Facies are based on examination of cores from nine wells taken vertically at half-foot intervals. Predictor variables include five from wireline log measurements and two geologic constraining variables that are derived from geologic knowledge. These are essentially continuous variables sampled at a half-foot sample rate.
The seven predictor variables are:
* Five wire line log curves include gamma ray (GR), resistivity logging (ILD_log10),
photoelectric effect (PE), neutron-density porosity difference and average neutron-density porosity (DeltaPHI and PHIND). Note, some wells do not have PE.
* Two geologic constraining variables: nonmarine-marine indicator (NM_M) and relative position (RELPOS)
The nine discrete facies (classes of rocks) are:
1. Nonmarine sandstone
2. Nonmarine coarse siltstone
3. Nonmarine fine siltstone
4. Marine siltstone and shale
5. Mudstone (limestone)
6. Wackestone (limestone)
7. Dolomite
8. Packstone-grainstone (limestone)
9. Phylloid-algal bafflestone (limestone)
These facies aren't discrete, and gradually blend into one another. Some have neighboring facies that are rather close. Mislabeling within these neighboring facies can be expected to occur. The following table lists the facies, their abbreviated labels and their approximate neighbors.
Facies |Label| Adjacent Facies
:---: | :---: |:--:
1 |SS| 2
2 |CSiS| 1,3
3 |FSiS| 2
4 |SiSh| 5
5 |MS| 4,6
6 |WS| 5,7
7 |D| 6,8
8 |PS| 6,7,9
9 |BS| 7,8
Let's clean up this dataset. The 'Well Name' and 'Formation' columns can be turned into a categorical data type.
End of explanation
# 1=sandstone 2=c_siltstone 3=f_siltstone
# 4=marine_silt_shale 5=mudstone 6=wackestone 7=dolomite
# 8=packstone 9=bafflestone
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00',
'#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS',
'WS', 'D','PS', 'BS']
#facies_color_map is a dictionary that maps facies labels
#to their respective colors
facies_color_map = {}
for ind, label in enumerate(facies_labels):
facies_color_map[label] = facies_colors[ind]
def label_facies(row, labels):
return labels[ row['Facies'] -1]
#training_data.loc[:,'FaciesLabels'] = training_data.apply(lambda row: label_facies(row, facies_labels), axis=1)
faciesVals = training_data['Facies'].values
well = training_data['Well Name'].values
mpl.rcParams['figure.figsize'] = (20.0, 10.0)
for w_idx, w in enumerate(np.unique(well)):
ax = plt.subplot(3, 4, w_idx+1)
hist = np.histogram(faciesVals[well == w], bins=np.arange(len(facies_labels)+1)+.5)
plt.bar(np.arange(len(hist[0])), hist[0], color=facies_colors, align='center')
ax.set_xticks(np.arange(len(hist[0])))
ax.set_xticklabels(facies_labels)
ax.set_title(w)
Explanation: This is a quick view of the statistical distribution of the input variables. Looking at the count values, there are 3232 feature vectors in the training set.
Remove a single well to use as a blind test later. For that let us look at distribution of facies across wells
End of explanation
PE_mask = training_data['PE'].notnull().values
training_data = training_data[PE_mask]
blind = training_data[training_data['Well Name'] == 'NEWBY']
training_data = training_data[training_data['Well Name'] != 'NEWBY']
training_data.loc[:,'FaciesLabels'] = training_data.apply(lambda row: label_facies(row, facies_labels), axis=1)
Explanation: Based on graphs above NEWBY has a good distribution of wells and is taken out as blind well to be tested. Also since training data has null, remove them from data.
End of explanation
def make_facies_log_plot(logs, facies_colors):
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im=ax[5].imshow(cluster, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[5])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-1):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)
Explanation: Let's take a look at the data from individual wells in a more familiar log plot form. We will create plots for the five well log variables, as well as a log for facies labels. The plots are based on the those described in Alessandro Amato del Monte's excellent tutorial.
End of explanation
make_facies_log_plot(
training_data[training_data['Well Name'] == 'SHRIMPLIN'],
facies_colors)
Explanation: Placing the log plotting code in a function will make it easy to plot the logs from multiples wells, and can be reused later to view the results when we apply the facies classification model to other wells. The function was written to take a list of colors and facies labels as parameters.
We then show log plots for wells SHRIMPLIN.
End of explanation
#count the number of unique entries for each facies, sort them by
#facies number (instead of by number of entries)
facies_counts = training_data['Facies'].value_counts().sort_index()
#use facies labels to index each count
facies_counts.index = facies_labels
facies_counts.plot(kind='bar',color=facies_colors,
title='Distribution of Training Data by Facies')
facies_counts
Explanation: In addition to individual wells, we can look at how the various facies are represented by the entire training set. Let's plot a histogram of the number of training examples for each facies class.
End of explanation
correct_facies_labels = training_data['Facies'].values
feature_vectors = training_data.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1)
feature_vectors.describe()
Explanation: This shows the distribution of examples by facies for the examples in the training set. Dolomite (facies 7) has the fewest with 81 examples. Depending on the performance of the classifier we are going to train, we may consider getting more examples of these facies.
Conditioning the data set
Now we extract just the feature variables we need to perform the classification. The predictor variables are the five wireline values and two geologic constraining variables. We also get a vector of the facies labels that correspond to each feature vector.
End of explanation
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(feature_vectors)
scaled_features = scaler.transform(feature_vectors)
feature_vectors
Explanation: Scikit includes a preprocessing module that can 'standardize' the data (giving each variable zero mean and unit variance, also called whitening). Many machine learning algorithms assume features will be standard normally distributed data (ie: Gaussian with zero mean and unit variance). The factors used to standardize the training set must be applied to any subsequent feature set that will be input to the classifier. The StandardScalar class can be fit to the training set, and later used to standardize any training data.
End of explanation
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
scaled_features, correct_facies_labels, test_size=0.2, random_state=42)
Explanation: Scikit also includes a handy function to randomly split the training data into training and test sets. The test set contains a small subset of feature vectors that are not used to train the network. Because we know the true facies labels for these examples, we can compare the results of the classifier to the actual facies and determine the accuracy of the model. Let's use 20% of the data for the test set.
End of explanation
from sklearn import neighbors
clf = neighbors.KNeighborsClassifier(n_neighbors=20,
weights='distance',
algorithm='kd_tree',
leaf_size=30,
metric='minkowski',
p=1)
clf.fit(X_train,y_train)
predicted_labels = clf.predict(X_test)
Explanation: Training the classifier using Majority voting
Now we use the cleaned and conditioned training set to create a facies classifier. As mentioned above, we will use a type of machine learning model known as a Majority voting.
We trained classifier on four models KNeighbours, Random forest, logistic regression and Gradient boosting.
End of explanation
from sklearn.metrics import confusion_matrix
from classification_utilities import display_cm, display_adj_cm
conf = confusion_matrix(y_test, predicted_labels)
display_cm(conf, facies_labels, hide_zeros=True)
Explanation: Now that the model has been trained on our data, we can use it to predict the facies of the feature vectors in the test set. Because we know the true facies labels of the vectors in the test set, we can use the results to evaluate the accuracy of the classifier.
We need some metrics to evaluate how good our classifier is doing. A confusion matrix is a table that can be used to describe the performance of a classification model. Scikit-learn allows us to easily create a confusion matrix by supplying the actual and predicted facies labels.
The confusion matrix is simply a 2D array. The entries of confusion matrix C[i][j] are equal to the number of observations predicted to have facies j, but are known to have facies i.
To simplify reading the confusion matrix, a function has been written to display the matrix along with facies labels and various error metrics. See the file classification_utilities.py in this repo for the display_cm() function.
End of explanation
def accuracy(conf):
total_correct = 0.
nb_classes = conf.shape[0]
for i in np.arange(0,nb_classes):
total_correct += conf[i][i]
acc = total_correct/sum(sum(conf))
return acc
Explanation: The rows of the confusion matrix correspond to the actual facies labels. The columns correspond to the labels assigned by the classifier. For example, consider the first row. For the feature vectors in the test set that actually have label SS, 23 were correctly indentified as SS, 21 were classified as CSiS and 2 were classified as FSiS.
The entries along the diagonal are the facies that have been correctly classified. Below we define two functions that will give an overall value for how the algorithm is performing. The accuracy is defined as the number of correct classifications divided by the total number of classifications.
End of explanation
adjacent_facies = np.array([[1], [0,2], [1], [4], [3,5], [4,6,7], [5,7], [5,6,8], [6,7]])
def accuracy_adjacent(conf, adjacent_facies):
nb_classes = conf.shape[0]
total_correct = 0.
for i in np.arange(0,nb_classes):
total_correct += conf[i][i]
for j in adjacent_facies[i]:
total_correct += conf[i][j]
return total_correct / sum(sum(conf))
print('Facies classification accuracy = %f' % accuracy(conf))
print('Adjacent facies classification accuracy = %f' % accuracy_adjacent(conf, adjacent_facies))
#Now do random forest
from sklearn.ensemble import RandomForestClassifier
RFC = RandomForestClassifier(n_estimators=200,
criterion='gini',
max_features='auto',
max_depth=None,
min_samples_split=7,
min_samples_leaf=1,
min_weight_fraction_leaf=0,
max_leaf_nodes=None,
min_impurity_split=1e-07,
bootstrap=True,
oob_score=False,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None
)
# n_estimators=150,
# min_samples_leaf= 50,class_weight="balanced",oob_score=True,random_state=50
RFC.fit(X_train,y_train)
rfpredicted_labels = RFC.predict(X_test)
RFconf = confusion_matrix(y_test, rfpredicted_labels)
display_cm(RFconf, facies_labels, hide_zeros=True)
print('Facies classification accuracy = %f' % accuracy(RFconf))
print('Adjacent facies classification accuracy = %f' % accuracy_adjacent(RFconf, adjacent_facies))
#Now do Gradient Boosting
seed = 123
np.random.seed(seed)
from sklearn.ensemble import GradientBoostingClassifier
gbModel = GradientBoostingClassifier(loss='deviance',
learning_rate=0.1,
n_estimators=200,
max_depth=2,
min_samples_split=25,
min_samples_leaf=5,
max_features=None,
max_leaf_nodes=None,
random_state=seed,
verbose=0)
gbModel.fit(X_train,y_train)
gbpredicted_labels = gbModel.predict(X_test)
gbconf = confusion_matrix(y_test, gbpredicted_labels)
display_cm(gbconf, facies_labels, hide_zeros=True)
print('Facies classification accuracy = %f' % accuracy(gbconf))
print('Adjacent facies classification accuracy = %f' % accuracy_adjacent(gbconf, adjacent_facies))
from sklearn import linear_model
lgr = linear_model.LogisticRegression(penalty='l2',
dual=False,
C=1e6,
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
max_iter=100,
random_state=seed,
solver='newton-cg',
tol=1e-04,
multi_class='multinomial',
warm_start=False,
verbose=0)
# class_weight='balanced',multi_class='ovr',solver='sag',max_iter=1000,random_state=40,C=1e5)
lgr.fit(X_train,y_train)
lgrpredicted_labels = lgr.predict(X_test)
lgrconf = confusion_matrix(y_test, lgrpredicted_labels)
display_cm(lgrconf, facies_labels, hide_zeros=True)
print('Facies classification accuracy = %f' % accuracy(lgrconf))
print('Adjacent facies classification accuracy = %f' % accuracy_adjacent(lgrconf, adjacent_facies))
Explanation: As noted above, the boundaries between the facies classes are not all sharp, and some of them blend into one another. The error within these 'adjacent facies' can also be calculated. We define an array to represent the facies adjacent to each other. For facies label i, adjacent_facies[i] is an array of the adjacent facies labels.
End of explanation
from sklearn.ensemble import VotingClassifier
vtclf = VotingClassifier(estimators=[
('KNN', clf), ('RFC', RFC), ('GBM', gbModel),('LR',lgr)],
voting='hard',
weights=[2,2,1,1])
vtclf.fit(X_train,y_train)
vtclfpredicted_labels = vtclf.predict(X_test)
vtclfconf = confusion_matrix(y_test, vtclfpredicted_labels)
display_cm(vtclfconf, facies_labels, hide_zeros=True)
print('Facies classification accuracy = %f' % accuracy(vtclfconf))
print('Adjacent facies classification accuracy = %f' % accuracy_adjacent(vtclfconf, adjacent_facies))
Explanation: Using Voting classifier Now
The voting classifier is now used to vote and classify models
End of explanation
blind
Explanation: Applying the classification model to the blind data
We held a well back from the training, and stored it in a dataframe called blind:
End of explanation
y_blind = blind['Facies'].values
Explanation: The label vector is just the Facies column:
End of explanation
well_features = blind.drop(['Facies', 'Formation', 'Well Name', 'Depth'], axis=1)
Explanation: We can form the feature matrix by dropping some of the columns and making a new dataframe:
End of explanation
X_blind = scaler.transform(well_features)
Explanation: Now we can transform this with the scaler we made before:
End of explanation
y_pred = vtclf.predict(X_blind)
blind['Prediction'] = y_pred
Explanation: Now it's a simple matter of making a prediction and storing it back in the dataframe:
End of explanation
cv_conf = confusion_matrix(y_blind, y_pred)
print('Optimized facies classification accuracy = %.2f' % accuracy(cv_conf))
print('Optimized adjacent facies classification accuracy = %.2f' % accuracy_adjacent(cv_conf, adjacent_facies))
Explanation: Let's see how we did with the confusion matrix:
End of explanation
display_cm(cv_conf, facies_labels,
display_metrics=True, hide_zeros=True)
Explanation: We managed 0.46 using the test data, but it was from the same wells as the training data. T
End of explanation
display_adj_cm(cv_conf, facies_labels, adjacent_facies,
display_metrics=True, hide_zeros=True)
def compare_facies_plot(logs, compadre, facies_colors):
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster1 = np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
cluster2 = np.repeat(np.expand_dims(logs[compadre].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=7, figsize=(9, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im1 = ax[5].imshow(cluster1, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
im2 = ax[6].imshow(cluster2, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[6])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im2, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-2):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[6].set_xlabel(compadre)
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
ax[6].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)
compare_facies_plot(blind, 'Prediction', facies_colors)
Explanation: ...but does remarkably well on the adjacent facies predictions.
End of explanation
well_data = pd.read_csv('validation_data_nofacies.csv')
well_data['Well Name'] = well_data['Well Name'].astype('category')
well_features = well_data.drop(['Formation', 'Well Name', 'Depth'], axis=1)
Explanation: Applying the classification model to new data
Now that we have a trained facies classification model we can use it to identify facies in wells that do not have core data. In this case, we will apply the classifier to two wells, but we could use it on any number of wells for which we have the same set of well logs for input.
This dataset is similar to the training data except it does not have facies labels. It is loaded into a dataframe called test_data.
End of explanation
X_unknown = scaler.transform(well_features)
Explanation: The data needs to be scaled using the same constants we used for the training data.
End of explanation
#predict facies of unclassified data
y_unknown = vtclf.predict(X_unknown)
vtclf.score(X_unknown)
well_data['Facies'] = y_unknown
well_data
well_data['Well Name'].unique()
Explanation: Finally we predict facies labels for the unknown data, and store the results in a Facies column of the test_data dataframe.
End of explanation
make_facies_log_plot(
well_data[well_data['Well Name'] == 'STUART'],
facies_colors=facies_colors)
make_facies_log_plot(
well_data[well_data['Well Name'] == 'CRAWFORD'],
facies_colors=facies_colors)
Explanation: We can use the well log plot to view the classification results along with the well logs.
End of explanation
well_data.to_csv('well_data_with_facies.csv')
Explanation: Finally we can write out a csv file with the well data along with the facies classification results.
End of explanation |
14,751 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
An Introduction to K-Means Clustering
by Scott Hendrickson & Fiona Pigott
K-Means is for learning unknown categories
K-means is a machine learning technique for learning unknown categories--in other words, a technique for unsupervised learning. K-means tries to group n-dimensional data into clusters, where the actual position of those clusters is unknown.
Basic Idea
From the Wikipedia article on k-means clustering
Step1: 1) How might we identify the two clusters?
We're going to set $k = 2$ before trying to use k-means to separate the clusters
We happen to know that $k = 2$ because we k=just made up this data with two distributions. I'll talk a little at the end about how to guess $k$ for a real-world dataset.
Because we created this example, we know the "truth"
We know which data came from which distribution (this is what k-means is trying to discover).
Here's the truth, just to compare
Step2: 2) Start by guessing the cluster membership
In this case, guessing means "randomly assign cluster membership." There are other heuristics that you could use to make an initial guess, but we won't get into those here.
Step3: 3) Find the center of a set of data points
We'll need a way of determining the center of a set of data, after we make a guess at cluster membership.
In this case, we'll find the centers of the two clusters that we guessed about.
Step4: 4) Update membership of points to closest centroid
Find distances (will use to find distances to the centroid, in this case)
Step5: 5) Put it all together so that we can iterate
Now we're going to iterate--assign clusters, finda centroid, reassign clusters--until the centroid positions stop changing very much.
Step6: K-means with real data
Figuring out how many clusters to look for (that pesky "Step 1")
Now, one thing we haven't covered yet is how to decide on the number of clusters to look for in the first place. There are several different heuristics that we can use to figure out what the "best" number of clusters is (we go into this more in https | Python Code:
# Import some python libraries that we'll need
import matplotlib.pyplot as plt
import random
import math
import sys
%matplotlib inline
def make_data(n_points, n_clusters=2, dim=2, sigma=1):
x = [[] for i in range(dim)]
for i in range(n_clusters):
for d in range(dim):
x[d].extend([random.gauss(i*3,sigma) for j in range(n_points)])
return x
# make our synthetic data
num_clusters = 2
num_points = 100
data_sample = make_data(num_points, num_clusters)
# plot our synthetic data
fig = plt.figure(figsize = [6,6])
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.scatter(*data_sample)
ax.set_title("Sample dataset, {} points per cluster, {} clusters".format(num_points,num_clusters))
Explanation: An Introduction to K-Means Clustering
by Scott Hendrickson & Fiona Pigott
K-Means is for learning unknown categories
K-means is a machine learning technique for learning unknown categories--in other words, a technique for unsupervised learning. K-means tries to group n-dimensional data into clusters, where the actual position of those clusters is unknown.
Basic Idea
From the Wikipedia article on k-means clustering:
"k-means clustering aims to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean, serving as a prototype of the cluster"
Basically, k-means assumes that for some sensible distance metric, it's possible to partition data into groups around the "center" ("centroid") of different naturally separated clusters in the data.
This concept can be very useful for separating datasets that came from separate generative processes, where the location of each dataset is pretty much unknown. It only works well if there is an expectation that the datasets are clustered around their means, and that the means would reasonably be different. A classic example of where k-means would not separate datasets well is when the datasets have different distibutions, but a similar mean. Not a good problem to apply k-means to: <img src="files/ring_clusters.png" style="width: 300px;">
Good problem to apply k-means to: <img src="files/kmeans_clusters.jpg" style="width: 300px;">
Question:
Is there an underlying stucture to my data? Does my data have defined categories that I don't know about? How can I identify whuich datapoint belongs to which category?
Solution:
For a selection of centers (centroids) of data clusters (and we'll talk about how to choose centroids), for each data point, label that data point with the centroid it is closest to.
Algorithm
0) Have a dataset that you want to sort into clusters
1) Choose a number of clusters that you're going to look for (there are ways to optimize this, but you have to fix it for the next step)
2) Guess at cluster membership for each data point (basically, for each data point, randomly assign it to a cluster)
3) Find the center ("centroid") of each cluster (with the data points that you've assigned to it)
4) For each centroid, find which data points are closest to it, and assign those data points to its cluster
5) Repeat 3 & 4 (re-evaluate centroids based on new cluster memberhip, then re-assign clusters based on new centroids)
0) A cloud of data in two dimensions
Setting up an example of data that could be separated by k-means:
First, we'll generate a synthetic dataset from two different spherical gaussian distributions, setting the spacing so that clouds of data overlap a litte.
End of explanation
# plot our synthetic data
fig = plt.figure(figsize = [6,6])
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.scatter(data_sample[0][0:100], data_sample[1][0:100])
ax.scatter(data_sample[0][100:200], data_sample[1][100:200])
ax.set_title("Sample dataset, {} points per cluster, {} clusters".format(num_points,num_clusters))
Explanation: 1) How might we identify the two clusters?
We're going to set $k = 2$ before trying to use k-means to separate the clusters
We happen to know that $k = 2$ because we k=just made up this data with two distributions. I'll talk a little at the end about how to guess $k$ for a real-world dataset.
Because we created this example, we know the "truth"
We know which data came from which distribution (this is what k-means is trying to discover).
Here's the truth, just to compare:
End of explanation
# each cluster membership is going to have a color label ("red" cluster, "orange" cluster, etc)
co = ["red", "orange", "yellow", "green", "purple", "blue", "black","brown"]
def guess_clusters(x, n_clusters):
# req co list of identifiers
for i in range(len(x[0])):
return [ co[random.choice(range(n_clusters))] for i in range(len(x[0]))]
# now guess the cluster membership--simply by randomly assigning a cluster label
# "orange" or "red" to each of the data points
membership_2 = guess_clusters(data_sample,2)
fig = plt.figure(figsize = [6,6])
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.scatter(*data_sample, color=membership_2)
ax.set_title("Data set drawn from 2 different 2D Gaussian distributions")
Explanation: 2) Start by guessing the cluster membership
In this case, guessing means "randomly assign cluster membership." There are other heuristics that you could use to make an initial guess, but we won't get into those here.
End of explanation
def centroid(x):
return [[sum(col)/float(len(x[0]))] for col in x]
# function to select members of only one cluster
def select_members(x, membership, cluster):
return [ [i for i,label in zip(dim, membership) if label == cluster] for dim in x ]
fig = plt.figure(figsize = [6,6])
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.scatter(*select_members(data_sample, membership_2, "red"), color="red")
ax.scatter(*centroid(select_members(data_sample, membership_2, "red")), color="black", marker="*", s = 100)
ax.set_title("Centroid of the 'red' cluster (black star)")
fig = plt.figure(figsize = [6,6])
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.scatter(*select_members(data_sample, membership_2, "orange"), color="orange")
ax.scatter(*centroid(select_members(data_sample, membership_2, "orange")), color="black", marker="*", s = 100)
ax.set_title("Centroid of the 'orange' cluster (black star)")
Explanation: 3) Find the center of a set of data points
We'll need a way of determining the center of a set of data, after we make a guess at cluster membership.
In this case, we'll find the centers of the two clusters that we guessed about.
End of explanation
def distance(p1, p2):
# odd... vectors are lists of lists with only 1 element in each dim
return math.sqrt(sum([(i[0]-j[0])**2 for i,j in zip(p1, p2)]))
# here's the distance between two points, just to show how it works
print("Distance between (-1,-1) and (2,3): {}".format(distance([[-1],[-1]],[[2],[3]])))
def reassign(x, centriods):
membership = []
for idx in range(len(x[0])):
min_d = sys.maxsize
cluster = ""
for c, vc in centriods.items():
dist = distance(vc, [[t[idx]] for t in x])
if dist < min_d:
min_d = dist
cluster = c
membership.append(cluster)
return membership
cent_2 = {i:centroid(select_members(data_sample, membership_2, i)) for i in co[:2]}
membership_2 = reassign(data_sample, cent_2)
fig = plt.figure(figsize = [6,6])
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.scatter(*data_sample, color=membership_2)
ax.scatter(*cent_2["red"], color="black", marker="*", s = 360)
ax.scatter(*cent_2["orange"], color="black", marker="*", s = 360)
ax.scatter(*cent_2["red"], color="red", marker="*", s = 200)
ax.scatter(*cent_2["orange"], color="orange", marker="*", s = 200)
Explanation: 4) Update membership of points to closest centroid
Find distances (will use to find distances to the centroid, in this case):
End of explanation
# function
def get_centroids(x, membership):
return {i:centroid(select_members(x, membership, i)) for i in set(membership)}
# redefine with total distance measure
def reassign(x, centroids):
membership, scores = [], {}
# step through all the vectors
for idx in range(len(x[0])):
min_d, cluster = sys.maxsize, None # set the min distance to a large number (we're about to minimize it)
for c, vc in centroids.items():
# get the sum of the distances from each point in the cluster to the centroids
dist = distance(vc, [[t[idx]] for t in x])
if dist < min_d:
min_d = dist
cluster = c
# score is the minumum distance from each point in a cluster to the centroid of that cluster
scores[cluster] = min_d + scores.get(cluster, 0)
membership.append(cluster)
# retrun the membership & the sum of all the score over all of the clusters
return membership, sum(scores.values())/float(len(x[0]))
def k_means(data, k):
# start with random distribution
membership = guess_clusters(data, k)
score, last_score = 0.0, sys.maxsize
while abs(last_score - score) > 1e-7:
last_score = score
c = get_centroids(data, membership)
membership, score = reassign(data, c)
#print(last_score - score)
return membership, c, score
mem, cl, s = k_means(data_sample, 2)
fig = plt.figure(figsize = [6,6])
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.scatter(*data_sample, color = mem)
for i, pt in cl.items():
ax.scatter(*pt, color="black", marker="*", s = 16*8)
ax.set_title("Clustering from k-means")
Explanation: 5) Put it all together so that we can iterate
Now we're going to iterate--assign clusters, finda centroid, reassign clusters--until the centroid positions stop changing very much.
End of explanation
err = []
trial_ks = range(1,5)
results = {}
for k in trial_ks:
mem_2, cl_2, s_2 = k_means(data_sample, k)
results[k] = mem_2
err.append(s_2)
f, axes = plt.subplots(1, len(trial_ks), sharey=True, figsize = (18,4))
for i,k in enumerate(trial_ks):
axes[i].set_aspect('equal')
axes[i].set_title("k-means results with k = {} \n error = {:f}".format(k, err[i]))
axes[i].scatter(*data_sample, color = results[k])
# plot the error as a function of the number of clusters
fig = plt.figure(figsize = [6,6])
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.plot(trial_ks,err,'o--')
ax.set_title("Error as a funtion of k")
ax.xaxis.set_ticks(trial_ks)
_ = ax.set_xlabel("number of clusters (k)")
# a different example, this time with 4 clusters
ex4 = make_data(200, 4)
err4 = []
trial_ks_4 = range(1,9)
results_4 = {}
for k in trial_ks_4:
mem_ex4, cl_ex4, s_ex4 = k_means(ex4, k)
results_4[k] = mem_ex4
err4.append(s_ex4)
f, axes = plt.subplots(2, int(len(trial_ks_4)/2), sharey=True, figsize = (18,11))
for i,k in enumerate(trial_ks_4):
axes[int(i >= 4)][i%4].set_aspect('equal')
axes[int(i >= 4)][i%4].set_title("k-means results with k = {} \n error = {:f}".format(k, err4[i]))
axes[int(i >= 4)][i%4].scatter(*ex4, color = results_4[k])
# plot the error as a function of the number of clusters
fig = plt.figure(figsize = [6,6])
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.plot(trial_ks_4,err4,'o--')
ax.set_title("Error as a funtion of k")
ax.xaxis.set_ticks(trial_ks_4)
_ = ax.set_xlabel("number of clusters (k)")
Explanation: K-means with real data
Figuring out how many clusters to look for (that pesky "Step 1")
Now, one thing we haven't covered yet is how to decide on the number of clusters to look for in the first place. There are several different heuristics that we can use to figure out what the "best" number of clusters is (we go into this more in https://github.com/DrSkippy/Data-Science-45min-Intros/tree/master/choosing-k-in-kmeans).
The one heuristic that we're going to talk about here is finding the "knee" in the k-means error function.
The error function:
In this case, the error function is simply the sum of all of the distances from each data point to its assigned cluster, summed over all of the clusters. The further each data point is from its assigned cluster, the larger this error score is.
Look for the "knee":
When I say "knee" I mean to look for a bend in the graoh of the error score vs $k$. The idea is to find the place where you get a smaller decrease in the error (distance from each data point to a centroid) for every increase in the number of clusters ($k$).
End of explanation |
14,752 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Primitive generators
This notebook contains tests for tohu's primitive generators.
Step1: Constant
Constant simply returns the same, constant value every time.
Step2: Boolean
Boolean returns either True or False, optionally with different probabilities.
Step3: Integer
Integer returns a random integer between low and high (both inclusive).
Step4: Float
Float returns a random float between low and high (both inclusive).
Step5: HashDigest
HashDigest returns hex strings representing hash digest values (or alternatively raw bytes).
HashDigest hex strings (uppercase)
Step6: HashDigest hex strings (lowercase)
Step7: HashDigest byte strings
Step8: FakerGenerator
FakerGenerator gives access to any of the methods supported by the faker module. Here are a couple of examples.
Example
Step9: Example | Python Code:
import tohu
from tohu.v5.primitive_generators import *
from tohu.v5.utils import print_generated_sequence
print(f'Tohu version: {tohu.__version__}')
Explanation: Primitive generators
This notebook contains tests for tohu's primitive generators.
End of explanation
g = Constant('quux')
print_generated_sequence(g, num=10, seed=12345)
Explanation: Constant
Constant simply returns the same, constant value every time.
End of explanation
g1 = Boolean()
g2 = Boolean(p=0.8)
print_generated_sequence(g1, num=20, seed=12345)
print_generated_sequence(g2, num=20, seed=99999)
Explanation: Boolean
Boolean returns either True or False, optionally with different probabilities.
End of explanation
g = Integer(low=100, high=200)
print_generated_sequence(g, num=20, seed=12345)
Explanation: Integer
Integer returns a random integer between low and high (both inclusive).
End of explanation
g = Float(low=2.3, high=4.2)
print_generated_sequence(g, num=10, sep='\n', fmt='.12f', seed=12345)
Explanation: Float
Float returns a random float between low and high (both inclusive).
End of explanation
g = HashDigest(length=6)
print_generated_sequence(g, num=10, seed=12345)
Explanation: HashDigest
HashDigest returns hex strings representing hash digest values (or alternatively raw bytes).
HashDigest hex strings (uppercase)
End of explanation
g = HashDigest(length=6, uppercase=False)
print_generated_sequence(g, num=10, seed=12345)
Explanation: HashDigest hex strings (lowercase)
End of explanation
g = HashDigest(length=10, as_bytes=True)
print_generated_sequence(g, num=5, seed=12345, sep='\n')
Explanation: HashDigest byte strings
End of explanation
g = FakerGenerator(method='name')
print_generated_sequence(g, num=8, seed=12345)
Explanation: FakerGenerator
FakerGenerator gives access to any of the methods supported by the faker module. Here are a couple of examples.
Example: random names
End of explanation
g = FakerGenerator(method='address')
print_generated_sequence(g, num=8, seed=12345, sep='\n---\n')
Explanation: Example: random addresses
End of explanation |
14,753 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<a href="https
Step1: Predict Shakespeare with Cloud TPUs and Keras
Overview
This example uses tf.keras to build a language model and train it on a Cloud TPU. This language model predicts the next character of text given the text so far. The trained model can generate new snippets of text that read in a similar style to the text training data.
The model trains for 10 epochs and completes in approximately 5 minutes.
This notebook is hosted on GitHub. To view it in its original repository, after opening the notebook, select File > View on GitHub.
Learning objectives
In this Colab, you will learn how to
Step3: Build the input dataset
We just downloaded some text. The following shows the start of the text and a random snippet so we can get a feel for the whole text.
Step5: Build the model
The model is defined as a two-layer, forward-LSTM, the same model should work both on CPU and TPU.
Because our vocabulary size is 256, the input dimension to the Embedding layer is 256.
When specifying the arguments to the LSTM, it is important to note how the stateful argument is used. When training we will make sure that stateful=False because we do want to reset the state of our model between batches, but when sampling (computing predictions) from a trained model, we want stateful=True so that the model can retain information across the current batch and generate more interesting text.
Step6: Train the model
First, we need to create a distribution strategy that can use the TPU. In this case it is TPUStrategy. You can create and compile the model inside its scope. Once that is done, future calls to the standard Keras methods fit, evaluate and predict use the TPU.
Again note that we train with stateful=False because while training, we only care about one batch at a time.
Step7: Make predictions with the model
Use the trained model to make predictions and generate your own Shakespeare-esque play.
Start the model off with a seed sentence, then generate 250 characters from it. The model makes five predictions from the initial seed.
The predictions are done on the CPU so the batch size (5) in this case does not have to be divisible by 8.
Note that when we are doing predictions or, to be more precise, text generation, we set stateful=True so that the model's state is kept between batches. If stateful is false, the model state is reset between each batch, and the model will only be able to use the information from the current batch (a single character) to make a prediction.
The output of the model is a set of probabilities for the next character (given the input so far). To build a paragraph, we predict one character at a time and sample a character (based on the probabilities provided by the model). For example, if the input character is "o" and the output probabilities are "p" (0.65), "t" (0.30), others characters (0.05), then we allow our model to generate text other than just "Ophelia" and "Othello." | Python Code:
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
Explanation: <a href="https://colab.research.google.com/github/gmonce/datascience/blob/master/Predict_Shakespeare_with_Cloud_TPUs_and_Keras.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Copyright 2018 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
!wget --show-progress --continue -O /content/shakespeare.txt http://www.gutenberg.org/files/100/100-0.txt
Explanation: Predict Shakespeare with Cloud TPUs and Keras
Overview
This example uses tf.keras to build a language model and train it on a Cloud TPU. This language model predicts the next character of text given the text so far. The trained model can generate new snippets of text that read in a similar style to the text training data.
The model trains for 10 epochs and completes in approximately 5 minutes.
This notebook is hosted on GitHub. To view it in its original repository, after opening the notebook, select File > View on GitHub.
Learning objectives
In this Colab, you will learn how to:
* Build a two-layer, forward-LSTM model.
* Use distribution strategy to produce a tf.keras model that runs on TPU version and then use the standard Keras methods to train: fit, predict, and evaluate.
* Use the trained model to make predictions and generate your own Shakespeare-esque play.
Instructions
<h3> Train on TPU <a href="https://cloud.google.com/tpu/"><img valign="middle" src="https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/tpu-hexagon.png" width="50"></a></h3>
On the main menu, click Runtime and select Change runtime type. Set "TPU" as the hardware accelerator.
Click Runtime again and select Runtime > Run All. You can also run the cells manually with Shift-ENTER.
TPUs are located in Google Cloud, for optimal performance, they read data directly from Google Cloud Storage (GCS)
Data, model, and training
In this example, you train the model on the combined works of William Shakespeare, then use the model to compose a play in the style of The Great Bard:
<blockquote>
Loves that led me no dumbs lack her Berjoy's face with her to-day.
The spirits roar'd; which shames which within his powers
Which tied up remedies lending with occasion,
A loud and Lancaster, stabb'd in me
Upon my sword for ever: 'Agripo'er, his days let me free.
Stop it of that word, be so: at Lear,
When I did profess the hour-stranger for my life,
When I did sink to be cried how for aught;
Some beds which seeks chaste senses prove burning;
But he perforces seen in her eyes so fast;
And _
</blockquote>
Download data
Download The Complete Works of William Shakespeare as a single text file from Project Gutenberg. You use snippets from this file as the training data for the model. The target snippet is offset by one character.
End of explanation
!head -n5 /content/shakespeare.txt
!echo "..."
!shuf -n5 /content/shakespeare.txt
import numpy as np
import tensorflow as tf
import os
import distutils
if distutils.version.LooseVersion(tf.__version__) < '1.14':
raise Exception('This notebook is compatible with TensorFlow 1.14 or higher, for TensorFlow 1.13 or lower please use the previous version at https://github.com/tensorflow/tpu/blob/r1.13/tools/colab/shakespeare_with_tpu_and_keras.ipynb')
# This address identifies the TPU we'll use when configuring TensorFlow.
TPU_WORKER = 'grpc://' + os.environ['COLAB_TPU_ADDR']
SHAKESPEARE_TXT = '/content/shakespeare.txt'
def transform(txt):
return np.asarray([ord(c) for c in txt if ord(c) < 255], dtype=np.int32)
def input_fn(seq_len=100, batch_size=1024):
Return a dataset of source and target sequences for training.
with tf.io.gfile.GFile(SHAKESPEARE_TXT, 'r') as f:
txt = f.read()
source = tf.constant(transform(txt), dtype=tf.int32)
ds = tf.data.Dataset.from_tensor_slices(source).batch(seq_len+1, drop_remainder=True)
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
BUFFER_SIZE = 10000
ds = ds.map(split_input_target).shuffle(BUFFER_SIZE).batch(batch_size, drop_remainder=True)
return ds.repeat()
Explanation: Build the input dataset
We just downloaded some text. The following shows the start of the text and a random snippet so we can get a feel for the whole text.
End of explanation
EMBEDDING_DIM = 512
def lstm_model(seq_len=100, batch_size=None, stateful=True):
Language model: predict the next word given the current word.
source = tf.keras.Input(
name='seed', shape=(seq_len,), batch_size=batch_size, dtype=tf.int32)
embedding = tf.keras.layers.Embedding(input_dim=256, output_dim=EMBEDDING_DIM)(source)
lstm_1 = tf.keras.layers.LSTM(EMBEDDING_DIM, stateful=stateful, return_sequences=True)(embedding)
lstm_2 = tf.keras.layers.LSTM(EMBEDDING_DIM, stateful=stateful, return_sequences=True)(lstm_1)
predicted_char = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(256, activation='softmax'))(lstm_2)
return tf.keras.Model(inputs=[source], outputs=[predicted_char])
Explanation: Build the model
The model is defined as a two-layer, forward-LSTM, the same model should work both on CPU and TPU.
Because our vocabulary size is 256, the input dimension to the Embedding layer is 256.
When specifying the arguments to the LSTM, it is important to note how the stateful argument is used. When training we will make sure that stateful=False because we do want to reset the state of our model between batches, but when sampling (computing predictions) from a trained model, we want stateful=True so that the model can retain information across the current batch and generate more interesting text.
End of explanation
tf.keras.backend.clear_session()
resolver = tf.contrib.cluster_resolver.TPUClusterResolver(TPU_WORKER)
tf.contrib.distribute.initialize_tpu_system(resolver)
strategy = tf.contrib.distribute.TPUStrategy(resolver)
with strategy.scope():
training_model = lstm_model(seq_len=100, stateful=False)
training_model.compile(
optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.01),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
training_model.fit(
input_fn(),
steps_per_epoch=100,
epochs=10
)
training_model.save_weights('/tmp/bard.h5', overwrite=True)
Explanation: Train the model
First, we need to create a distribution strategy that can use the TPU. In this case it is TPUStrategy. You can create and compile the model inside its scope. Once that is done, future calls to the standard Keras methods fit, evaluate and predict use the TPU.
Again note that we train with stateful=False because while training, we only care about one batch at a time.
End of explanation
BATCH_SIZE = 5
PREDICT_LEN = 250
# Keras requires the batch size be specified ahead of time for stateful models.
# We use a sequence length of 1, as we will be feeding in one character at a
# time and predicting the next character.
prediction_model = lstm_model(seq_len=1, batch_size=BATCH_SIZE, stateful=True)
prediction_model.load_weights('/tmp/bard.h5')
# We seed the model with our initial string, copied BATCH_SIZE times
seed_txt = 'Looks it not like the king? Verily, we must go! '
seed = transform(seed_txt)
seed = np.repeat(np.expand_dims(seed, 0), BATCH_SIZE, axis=0)
# First, run the seed forward to prime the state of the model.
prediction_model.reset_states()
for i in range(len(seed_txt) - 1):
prediction_model.predict(seed[:, i:i + 1])
# Now we can accumulate predictions!
predictions = [seed[:, -1:]]
for i in range(PREDICT_LEN):
last_word = predictions[-1]
next_probits = prediction_model.predict(last_word)[:, 0, :]
# sample from our output distribution
next_idx = [
np.random.choice(256, p=next_probits[i])
for i in range(BATCH_SIZE)
]
predictions.append(np.asarray(next_idx, dtype=np.int32))
for i in range(BATCH_SIZE):
print('PREDICTION %d\n\n' % i)
p = [predictions[j][i] for j in range(PREDICT_LEN)]
generated = ''.join([chr(c) for c in p]) # Convert back to text
print(generated)
print()
assert len(generated) == PREDICT_LEN, 'Generated text too short'
Explanation: Make predictions with the model
Use the trained model to make predictions and generate your own Shakespeare-esque play.
Start the model off with a seed sentence, then generate 250 characters from it. The model makes five predictions from the initial seed.
The predictions are done on the CPU so the batch size (5) in this case does not have to be divisible by 8.
Note that when we are doing predictions or, to be more precise, text generation, we set stateful=True so that the model's state is kept between batches. If stateful is false, the model state is reset between each batch, and the model will only be able to use the information from the current batch (a single character) to make a prediction.
The output of the model is a set of probabilities for the next character (given the input so far). To build a paragraph, we predict one character at a time and sample a character (based on the probabilities provided by the model). For example, if the input character is "o" and the output probabilities are "p" (0.65), "t" (0.30), others characters (0.05), then we allow our model to generate text other than just "Ophelia" and "Othello."
End of explanation |
14,754 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Integration Exercise 2
Imports
Step1: Indefinite integrals
Here is a table of definite integrals. Many of these integrals has a number of parameters $a$, $b$, etc.
Find five of these integrals and perform the following steps
Step2: Integral 1
$$ I_1 = \int_0^\frac{\pi}{2} \sin^2{x} {dx} = \frac{\pi}{4} $$
Step3: Integral 2
$$ I_2 = \int_0^\infty \frac{x\sin{mx}}{x^2 + a^2} {dx} = \frac{\pi}{2} e^{-ma} $$
Step4: Integral 3
$$ I_3 = \int_0^\frac{\pi}{2} \sin{ax^2} {dx} = \frac{1}{2} \sqrt{\frac{\pi}{2 \pi}} $$
Step5: Integral 4
$$ I_4 = \int_0^\infty e^{-ax} \cos{bx}{dx}= \frac{a}{a^2+b^2}$$
Step6: Integral 5
$$ I_5 = \int_0^\infty e^{-ax^{2}} {dx}= \frac{1}{2} \sqrt{\frac{\pi}{a}}$$ | Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy import integrate
Explanation: Integration Exercise 2
Imports
End of explanation
def integrand(x, a):
return 1.0/(x**2 + a**2)
def integral_approx(a):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand, 0, np.inf, args=(a,))
return I
def integral_exact(a):
return 0.5*np.pi/a
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
Explanation: Indefinite integrals
Here is a table of definite integrals. Many of these integrals has a number of parameters $a$, $b$, etc.
Find five of these integrals and perform the following steps:
Typeset the integral using LateX in a Markdown cell.
Define an integrand function that computes the value of the integrand.
Define an integral_approx funciton that uses scipy.integrate.quad to peform the integral.
Define an integral_exact function that computes the exact value of the integral.
Call and print the return value of integral_approx and integral_exact for one set of parameters.
Here is an example to show what your solutions should look like:
Example
Here is the integral I am performing:
$$ I_1 = \int_0^\infty \frac{dx}{x^2 + a^2} = \frac{\pi}{2a} $$
End of explanation
def integrand(x, a):
return np.sin(x)**2
def integral_approx(a):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand, 0, np.pi/2, args=(a,))
return I
def integral_exact(a):
return np.pi/4
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
Explanation: Integral 1
$$ I_1 = \int_0^\frac{\pi}{2} \sin^2{x} {dx} = \frac{\pi}{4} $$
End of explanation
def integrand(x, m, a):
return (x*np.sin((m*x)))/(x**2 + a**2)
def integral_approx(m, a):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand, 0, np.inf, args=(m,a,))
return I
def integral_exact(m, a):
return ((np.pi)*.5) *(np.exp(-1*m*a))
print("Numerical: ", integral_approx(1.0,1.0))
print("Exact : ", integral_exact(1.0,1.0))
assert True # leave this cell to grade the above integral
Explanation: Integral 2
$$ I_2 = \int_0^\infty \frac{x\sin{mx}}{x^2 + a^2} {dx} = \frac{\pi}{2} e^{-ma} $$
End of explanation
def integrand(a,x):
return np.sin((a)*(x**2))
def integral_approx(a):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand, 0, (np.pi/2), args=(a))
return I
def integral_exact(a):
return .5*(np.sqrt((np.pi)/np.pi*2*np.pi))
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
Explanation: Integral 3
$$ I_3 = \int_0^\frac{\pi}{2} \sin{ax^2} {dx} = \frac{1}{2} \sqrt{\frac{\pi}{2 \pi}} $$
End of explanation
def integrand(a, x, b):
return np.exp(-a*x) * np.cos(b*x)
def integral_approx(a, b):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand, 0, np.inf, args=(a, b))
return I
def integral_exact(a, b):
return a / (a**2 + b**2)
print("Numerical: ", integral_approx(1.0,1.0))
print("Exact : ", integral_exact(1.0,1.0))
assert True # leave this cell to grade the above integral
Explanation: Integral 4
$$ I_4 = \int_0^\infty e^{-ax} \cos{bx}{dx}= \frac{a}{a^2+b^2}$$
End of explanation
def integrand(x, a):
return np.exp((-a)*(x**2))
def integral_approx(a):
# Use the args keyword argument to feed extra arguments to your integrand
I, e= integrate.quad(integrand, 0, np.inf, args=(a,))
return I
def integral_exact(a):
return 0.5*(np.sqrt((np.pi)/a))
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
Explanation: Integral 5
$$ I_5 = \int_0^\infty e^{-ax^{2}} {dx}= \frac{1}{2} \sqrt{\frac{\pi}{a}}$$
End of explanation |
14,755 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Field correlations in Atom-Cavity systems
A reproduction and verification of Rebic et al. PRA 69, 035804 (2004)
Step1: The states will be $\big|m\big\rangle \otimes \big|n\big\rangle$ where $\big|m\big\rangle$ are atomic states 1-4 and $\big|n\big\rangle$ are the photon states. Therefore we need to construct tensor operators
Step2: The two curves they show are for different values of g and pump, we'll try that here
Step3: Very nice agreement here.
Next, look at the other model (EIT Kerr) and compare.
Now for the EIT Kerr approach | Python Code:
from qutip import *
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
print(qutip.__version__)
import sys
print(sys.version)
# Note, it works fine to truncate at 4 (as in the paper)
# QuTiP can do much larger space just fine so feel free to increase this.
N=4
taus=np.linspace(0,10,500)
Explanation: Field correlations in Atom-Cavity systems
A reproduction and verification of Rebic et al. PRA 69, 035804 (2004)
End of explanation
a=tensor(identity(2), destroy(N))
sm=tensor(sigmam(),identity(N))
sz=tensor(sigmaz(),identity(N))
kappa = 1 # the cavity decay rate, all other rates given in terms of kappa:
g = 6*kappa
theta = g
Ep = 0.1*kappa
gamma = 0.1*kappa
Hint = theta*(0.5*sz + a.dag()*a) + 1j*g*(a.dag()*sm - a*sm.dag())
Hpump = 1j*Ep*(a - a.dag())
H = Hint + Hpump
c_ops = [np.sqrt(kappa)*a, np.sqrt(gamma)*sm]
rho0=tensor(fock_dm(2,0),fock_dm(N,0)) # start with empty cavity and in atomic ground state.
n_op = a.dag()*a
solution = mesolve(H,rho0,taus, c_ops, [n_op])
n = solution.expect[0]
plt.plot(taus,n)
# 1st order correlation:
g1 = coherence_function_g1(H, taus, c_ops, a)
plt.plot(taus,np.real(g1))
plt.ylabel(r"$g^{(1)}(\tau)$",fontsize=18)
plt.xlabel(r"$\kappa\tau$",fontsize=18)
# 2nd order correlation
g2_weak = coherence_function_g2(H, taus, c_ops, a)
plt.plot(taus,np.real(g2_weak))
Explanation: The states will be $\big|m\big\rangle \otimes \big|n\big\rangle$ where $\big|m\big\rangle$ are atomic states 1-4 and $\big|n\big\rangle$ are the photon states. Therefore we need to construct tensor operators:
End of explanation
kappa = 1
g = 20*kappa
theta = g
Ep = 0.5*kappa
gamma = 0.1*kappa
Hint = theta*(0.5*sz + a.dag()*a) + 1j*g*(a.dag()*sm - a*sm.dag())
Hpump = 1j*Ep*(a - a.dag())
H = Hint + Hpump
c_ops = [np.sqrt(kappa)*a, np.sqrt(gamma)*sm]
rho0=tensor(fock_dm(2,0),fock_dm(N,0))
g2_strong = coherence_function_g2(H, taus, c_ops, a)
plt.plot(taus,np.real(g2_weak),"-.")
plt.plot(taus,np.real(g2_strong))
plt.ylabel(r"$g^{(2)}(\tau)$",fontsize=18)
plt.xlabel(r"$\kappa\tau$",fontsize=18)
Explanation: The two curves they show are for different values of g and pump, we'll try that here:
End of explanation
# Define four level system and field:
N=4 # photons to track in the field
a=tensor(identity(4), destroy(N))
# the atomic levels:
one = Qobj([[1],[0],[0],[0]])
one = tensor(one, identity(N))
two = Qobj([[0],[1],[0],[0]])
two = tensor(two, identity(N))
three = Qobj([[0],[0],[1],[0]])
three = tensor(three, identity(N))
four = Qobj([[0],[0],[0],[1]])
four = tensor(four, identity(N))
# populations and coherences:
sig11 = one * one.dag()
sig22 = two * two.dag()
sig33 = three * three.dag()
sig44 = four * four.dag()
sig12 = one * two.dag()
sig13 = one * three.dag()
sig23 = two * three.dag()
sig24 = two * four.dag()
# Note, reversing the subscript is equivalent to taking the dagger:
sig13.dag() == three * one.dag()
# Definitions for the EIT model:
taus=np.linspace(0,10,1000)
kappa = 1
gamma = 0.1*kappa
Delta = 0.0
g1 = 6*kappa
g2 = 6*kappa
# For dashed line in plot:
delta = 0.2*kappa
OmegaC = 6*kappa
Ep = 0.7*kappa
Hint = delta*sig33 + Delta*sig44 + 1j*(g1*(a.dag()*sig13 - sig13.dag()*a) + OmegaC*(sig23 - sig23.dag()) + g2*(a.dag()*sig24 - sig24.dag()*a))
Hpump = 1j*Ep*(a - a.dag())
H = Hint + Hpump
c_ops = [np.sqrt(kappa)*a,np.sqrt(gamma + gamma)*sig33,np.sqrt(gamma)*sig44] # TODO: check the s33 and s44 decays.
rho0=tensor(fock_dm(4,0),fock_dm(N,0))
n_op = a.dag()*a
exp_ops = [n_op,sig11,sig22,sig33,sig44]
solution = mesolve(H,rho0,taus, c_ops, exp_ops)
n = solution.expect[0]
# Check the cavity photon number << 4
plt.plot(taus,n)
# Look at the level populations:
for i,v in enumerate(solution.expect[1:]):
plt.plot(taus,v,label="j={}".format(i+1))
plt.ylim(-0.2,1.2)
plt.legend()
plt.title("Populations")
plt.ylabel(r"$\sigma_{jj}$",fontsize=18)
plt.xlabel(r"$\kappa\tau$",fontsize=18)
g2_eit = coherence_function_g2(H, taus, c_ops, a)
plt.plot(taus,np.real(g2_eit))
# Repeat for different conditions:
taus=np.linspace(0,10,1000)
kappa = 1
gamma = 0.1*kappa
Delta = 0.0
g1 = 6*kappa
g2 = 6*kappa
# For solid line:
delta = 4*kappa
OmegaC = 12*kappa
Ep = 0.1*kappa
Hint = delta*sig33 + Delta*sig44 + 1j*(g1*(a.dag()*sig13 - sig13.dag()*a) + OmegaC*(sig23 - sig23.dag()) + g2*(a.dag()*sig24 - sig24.dag()*a))
Hpump = 1j*Ep*(a - a.dag())
H = Hint + Hpump
c_ops = [np.sqrt(kappa)*a,np.sqrt(gamma + gamma)*sig33,np.sqrt(gamma)*sig44] # TODO: check the s33 and s44 decays.
rho0=tensor(fock_dm(4,0),fock_dm(N,0))
g2_eit_solid = coherence_function_g2(H, taus, c_ops, a)
plt.plot(taus,np.real(g2_eit),"-.")
plt.plot(taus,np.real(g2_eit_solid))
plt.ylim(0,1.5)
plt.ylabel(r"$g^{(2)}(\tau)$",fontsize=18)
plt.xlabel(r"$\kappa\tau$",fontsize=18)
Explanation: Very nice agreement here.
Next, look at the other model (EIT Kerr) and compare.
Now for the EIT Kerr approach:
End of explanation |
14,756 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<a href="https
Step2: Imports
Step3: tf.data.Dataset
Step4: Let's have a look at the data
Step5: Keras model
If you are not sure what cross-entropy, dropout, softmax or batch-normalization mean, head here for a crash-course
Step6: Learning Rate schedule
Step7: Train and validate the model
Step8: Visualize predictions | Python Code:
BATCH_SIZE = 128
EPOCHS = 10
training_images_file = 'gs://mnist-public/train-images-idx3-ubyte'
training_labels_file = 'gs://mnist-public/train-labels-idx1-ubyte'
validation_images_file = 'gs://mnist-public/t10k-images-idx3-ubyte'
validation_labels_file = 'gs://mnist-public/t10k-labels-idx1-ubyte'
Explanation: <a href="https://colab.research.google.com/github/GoogleCloudPlatform/tensorflow-without-a-phd/blob/master/tensorflow-mnist-tutorial/keras_03_mnist_dense_lrdecay_dropout.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Parameters
End of explanation
import os, re, math, json, shutil, pprint
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import IPython.display as display
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
print("Tensorflow version " + tf.__version__)
#@title visualization utilities [RUN ME]
This cell contains helper functions used for visualization
and downloads only. You can skip reading it. There is very
little useful Keras/Tensorflow code here.
# Matplotlib config
plt.ioff()
plt.rc('image', cmap='gray_r')
plt.rc('grid', linewidth=1)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0', figsize=(16,9))
# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# pull a batch from the datasets. This code is not very nice, it gets much better in eager mode (TODO)
def dataset_to_numpy_util(training_dataset, validation_dataset, N):
# get one batch from each: 10000 validation digits, N training digits
batch_train_ds = training_dataset.unbatch().batch(N)
# eager execution: loop through datasets normally
if tf.executing_eagerly():
for validation_digits, validation_labels in validation_dataset:
validation_digits = validation_digits.numpy()
validation_labels = validation_labels.numpy()
break
for training_digits, training_labels in batch_train_ds:
training_digits = training_digits.numpy()
training_labels = training_labels.numpy()
break
else:
v_images, v_labels = validation_dataset.make_one_shot_iterator().get_next()
t_images, t_labels = batch_train_ds.make_one_shot_iterator().get_next()
# Run once, get one batch. Session.run returns numpy results
with tf.Session() as ses:
(validation_digits, validation_labels,
training_digits, training_labels) = ses.run([v_images, v_labels, t_images, t_labels])
# these were one-hot encoded in the dataset
validation_labels = np.argmax(validation_labels, axis=1)
training_labels = np.argmax(training_labels, axis=1)
return (training_digits, training_labels,
validation_digits, validation_labels)
# create digits from local fonts for testing
def create_digits_from_local_fonts(n):
font_labels = []
img = PIL.Image.new('LA', (28*n, 28), color = (0,255)) # format 'LA': black in channel 0, alpha in channel 1
font1 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'DejaVuSansMono-Oblique.ttf'), 25)
font2 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'STIXGeneral.ttf'), 25)
d = PIL.ImageDraw.Draw(img)
for i in range(n):
font_labels.append(i%10)
d.text((7+i*28,0 if i<10 else -4), str(i%10), fill=(255,255), font=font1 if i<10 else font2)
font_digits = np.array(img.getdata(), np.float32)[:,0] / 255.0 # black in channel 0, alpha in channel 1 (discarded)
font_digits = np.reshape(np.stack(np.split(np.reshape(font_digits, [28, 28*n]), n, axis=1), axis=0), [n, 28*28])
return font_digits, font_labels
# utility to display a row of digits with their predictions
def display_digits(digits, predictions, labels, title, n):
fig = plt.figure(figsize=(13,3))
digits = np.reshape(digits, [n, 28, 28])
digits = np.swapaxes(digits, 0, 1)
digits = np.reshape(digits, [28, 28*n])
plt.yticks([])
plt.xticks([28*x+14 for x in range(n)], predictions)
plt.grid(b=None)
for i,t in enumerate(plt.gca().xaxis.get_ticklabels()):
if predictions[i] != labels[i]: t.set_color('red') # bad predictions in red
plt.imshow(digits)
plt.grid(None)
plt.title(title)
display.display(fig)
# utility to display multiple rows of digits, sorted by unrecognized/recognized status
def display_top_unrecognized(digits, predictions, labels, n, lines):
idx = np.argsort(predictions==labels) # sort order: unrecognized first
for i in range(lines):
display_digits(digits[idx][i*n:(i+1)*n], predictions[idx][i*n:(i+1)*n], labels[idx][i*n:(i+1)*n],
"{} sample validation digits out of {} with bad predictions in red and sorted first".format(n*lines, len(digits)) if i==0 else "", n)
def plot_learning_rate(lr_func, epochs):
xx = np.arange(epochs+1, dtype=np.float)
y = [lr_decay(x) for x in xx]
fig, ax = plt.subplots(figsize=(9, 6))
ax.set_xlabel('epochs')
ax.set_title('Learning rate\ndecays from {:0.3g} to {:0.3g}'.format(y[0], y[-2]))
ax.minorticks_on()
ax.grid(True, which='major', axis='both', linestyle='-', linewidth=1)
ax.grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
ax.step(xx,y, linewidth=3, where='post')
display.display(fig)
class PlotTraining(tf.keras.callbacks.Callback):
def __init__(self, sample_rate=1, zoom=1):
self.sample_rate = sample_rate
self.step = 0
self.zoom = zoom
self.steps_per_epoch = 60000//BATCH_SIZE
def on_train_begin(self, logs={}):
self.batch_history = {}
self.batch_step = []
self.epoch_history = {}
self.epoch_step = []
self.fig, self.axes = plt.subplots(1, 2, figsize=(16, 7))
plt.ioff()
def on_batch_end(self, batch, logs={}):
if (batch % self.sample_rate) == 0:
self.batch_step.append(self.step)
for k,v in logs.items():
# do not log "batch" and "size" metrics that do not change
# do not log training accuracy "acc"
if k=='batch' or k=='size':# or k=='acc':
continue
self.batch_history.setdefault(k, []).append(v)
self.step += 1
def on_epoch_end(self, epoch, logs={}):
plt.close(self.fig)
self.axes[0].cla()
self.axes[1].cla()
self.axes[0].set_ylim(0, 1.2/self.zoom)
self.axes[1].set_ylim(1-1/self.zoom/2, 1+0.1/self.zoom/2)
self.epoch_step.append(self.step)
for k,v in logs.items():
# only log validation metrics
if not k.startswith('val_'):
continue
self.epoch_history.setdefault(k, []).append(v)
display.clear_output(wait=True)
for k,v in self.batch_history.items():
self.axes[0 if k.endswith('loss') else 1].plot(np.array(self.batch_step) / self.steps_per_epoch, v, label=k)
for k,v in self.epoch_history.items():
self.axes[0 if k.endswith('loss') else 1].plot(np.array(self.epoch_step) / self.steps_per_epoch, v, label=k, linewidth=3)
self.axes[0].legend()
self.axes[1].legend()
self.axes[0].set_xlabel('epochs')
self.axes[1].set_xlabel('epochs')
self.axes[0].minorticks_on()
self.axes[0].grid(True, which='major', axis='both', linestyle='-', linewidth=1)
self.axes[0].grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
self.axes[1].minorticks_on()
self.axes[1].grid(True, which='major', axis='both', linestyle='-', linewidth=1)
self.axes[1].grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
display.display(self.fig)
Explanation: Imports
End of explanation
AUTO = tf.data.experimental.AUTOTUNE
def read_label(tf_bytestring):
label = tf.io.decode_raw(tf_bytestring, tf.uint8)
label = tf.reshape(label, [])
label = tf.one_hot(label, 10)
return label
def read_image(tf_bytestring):
image = tf.io.decode_raw(tf_bytestring, tf.uint8)
image = tf.cast(image, tf.float32)/256.0
image = tf.reshape(image, [28*28])
return image
def load_dataset(image_file, label_file):
imagedataset = tf.data.FixedLengthRecordDataset(image_file, 28*28, header_bytes=16)
imagedataset = imagedataset.map(read_image, num_parallel_calls=16)
labelsdataset = tf.data.FixedLengthRecordDataset(label_file, 1, header_bytes=8)
labelsdataset = labelsdataset.map(read_label, num_parallel_calls=16)
dataset = tf.data.Dataset.zip((imagedataset, labelsdataset))
return dataset
def get_training_dataset(image_file, label_file, batch_size):
dataset = load_dataset(image_file, label_file)
dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
dataset = dataset.shuffle(5000, reshuffle_each_iteration=True)
dataset = dataset.repeat() # Mandatory for Keras for now
dataset = dataset.batch(batch_size, drop_remainder=True) # drop_remainder is important on TPU, batch size must be fixed
dataset = dataset.prefetch(AUTO) # fetch next batches while training on the current one (-1: autotune prefetch buffer size)
return dataset
def get_validation_dataset(image_file, label_file):
dataset = load_dataset(image_file, label_file)
dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
dataset = dataset.batch(10000, drop_remainder=True) # 10000 items in eval dataset, all in one batch
dataset = dataset.repeat() # Mandatory for Keras for now
return dataset
# instantiate the datasets
training_dataset = get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)
validation_dataset = get_validation_dataset(validation_images_file, validation_labels_file)
# For TPU, we will need a function that returns the dataset
training_input_fn = lambda: get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)
validation_input_fn = lambda: get_validation_dataset(validation_images_file, validation_labels_file)
Explanation: tf.data.Dataset: parse files and prepare training and validation datasets
Please read the best practices for building input pipelines with tf.data.Dataset
End of explanation
N = 24
(training_digits, training_labels,
validation_digits, validation_labels) = dataset_to_numpy_util(training_dataset, validation_dataset, N)
display_digits(training_digits, training_labels, training_labels, "training digits and their labels", N)
display_digits(validation_digits[:N], validation_labels[:N], validation_labels[:N], "validation digits and their labels", N)
font_digits, font_labels = create_digits_from_local_fonts(N)
Explanation: Let's have a look at the data
End of explanation
model = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=(28*28,)),
tf.keras.layers.Dense(200, activation='relu'),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Dense(60, activation='relu'),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01),
loss='categorical_crossentropy',
metrics=['accuracy'])
# print model layers
model.summary()
# utility callback that displays training curves
plot_training = PlotTraining(sample_rate=10, zoom=10)
Explanation: Keras model
If you are not sure what cross-entropy, dropout, softmax or batch-normalization mean, head here for a crash-course: Tensorflow and deep learning without a PhD
End of explanation
# lr decay function
def lr_decay(epoch):
return 0.01 * math.pow(0.6, epoch)
# lr schedule callback
lr_decay_callback = tf.keras.callbacks.LearningRateScheduler(lr_decay, verbose=True)
# important to see what you are doing
plot_learning_rate(lr_decay, EPOCHS)
Explanation: Learning Rate schedule
End of explanation
steps_per_epoch = 60000//BATCH_SIZE # 60,000 items in this dataset
print("Steps per epoch: ", steps_per_epoch)
history = model.fit(training_dataset, steps_per_epoch=steps_per_epoch, epochs=EPOCHS,
validation_data=validation_dataset, validation_steps=1, callbacks=[plot_training, lr_decay_callback])
Explanation: Train and validate the model
End of explanation
# recognize digits from local fonts
probabilities = model.predict(font_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_digits(font_digits, predicted_labels, font_labels, "predictions from local fonts (bad predictions in red)", N)
# recognize validation digits
probabilities = model.predict(validation_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_top_unrecognized(validation_digits, predicted_labels, validation_labels, N, 7)
Explanation: Visualize predictions
End of explanation |
14,757 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Exercises
Step1: Data
Step2: Exercise 1
Step3: b. Standard Deviation
Determine standard deviation of the sample.
Step4: c. Standard Error
Using the standard deviation and sample_size, determine the standard error for the sample.
Step5: d. Confidence Intervals
Using the standard error and mean, determine 95% (Z = 1.96), 90% (Z = 1.64), and 80% (Z = 1.28) confidence intervals for the sample.
Step6: Exercise 2
Step7: Exercise 3
Step8: b. Plotting Sample Means - Exponential
Draw 500 samples of size sample_size from a new exponential distribution, plot the means of each of the samples, and check to see if the distribution of the sample means is normal.
Step9: c.i Plotting Sample Means - Autocorrelated
Draw 500 samples of size sample_size from a new autocorrelated (dependent) distribution, plot the means of each of the samples, and check to see if the distribution of the sample means is normal.
Step10: c.ii Plotting Sample Standard Deviations - Autocorrelated
Draw 500 samples of size sample_size from the same autocorrelated distribution, plot the standard deviations of each of the samples, and check to see if the distribution of the sample standard deviations is normal.
Step11: Exercise 4
Step12: b. T-distribution Correction
Run 100 samples of size small_size, this time accouting for the small sample size using a t-distribution, and measure how many of their 95% confidence intervals actually contain the true mean.
Step13: Exercise 5
Step14: b. T-distribution Correction
Run 100 samples from the autocorrelated distribution, this time attempting to account for the autocorrelation using a t-distribution, and measure how many of their 95% confidence intervals actually contain the true mean to see if the correction works.
Step15: c. Newey-West Matrix
Use the newey_west_matrix helper function to compute an adjusted (robust) covariance matrix for a single sample of the autocorrelated data.
Step16: d. Newey-West Correction
Run 100 samples of the following autocorrelated distribution, this time accounting for the autocorrelation by using a Newey-West correction on the standard error, and measure how many of their 95% confidence intervals actually contain the true mean to see if the correction works. (Use the helper function newey_west_SE to find the corrected standard error) | Python Code:
def generate_autocorrelated_data(theta, mu, sigma, N):
X = np.zeros((N, 1))
for t in range(1, N):
X[t] = theta * X[t-1] + np.random.normal(mu, sigma)
return X
def newey_west_SE(data):
ind = range(0, len(data))
ind = sm.add_constant(ind)
model = regression.linear_model.OLS(data, ind).fit(cov_type='HAC',cov_kwds={'maxlags':1})
return model.bse[0]
def newey_west_matrix(data):
ind = range(0, len(data))
ind = sm.add_constant(ind)
model = regression.linear_model.OLS(data, ind).fit()
return sw.cov_hac(model)
# Useful Libraries
import numpy as np
import seaborn as sns
from scipy import stats
import matplotlib.pyplot as plt
from statsmodels.stats.stattools import jarque_bera
import statsmodels.stats.sandwich_covariance as sw
from statsmodels import regression
import statsmodels.api as sm
Explanation: Exercises: Confidence Intervals - Answer Key
By Christopher Fenaroli and Delaney Mackenzie
Lecture Link:
https://www.quantopian.com/lectures/confidence-intervals
IMPORTANT NOTE:
This lecture corresponds to the Confidence Intervals lecture, which is part of the Quantopian lecture series. This homework expects you to rely heavily on the code presented in the corresponding lecture. Please copy and paste regularly from that lecture when starting to work on the problems, as trying to do them from scratch will likely be too difficult.
When you feel comfortable with the topics presented here, see if you can create an algorithm that qualifies for the Quantopian Contest. Participants are evaluated on their ability to produce risk-constrained alpha and the top 10 contest participants are awarded cash prizes on a daily basis.
https://www.quantopian.com/contest
Part of the Quantopian Lecture Series:
www.quantopian.com/lectures
github.com/quantopian/research_public
Key Concepts
End of explanation
np.random.seed(11)
POPULATION_MU = 105
POPULATION_SIGMA = 20
sample_size = 50
Explanation: Data
End of explanation
sample = np.random.normal(POPULATION_MU, POPULATION_SIGMA, sample_size)
#Your code goes here
Mean = np.mean(sample)
print "Mean:", Mean
Explanation: Exercise 1: Determining Confidence Intervals
a. Mean
Determine the mean of the following artificial data in sample.
End of explanation
#Your code goes here
SD = np.std(sample)
print "Standard Deviation:", SD
Explanation: b. Standard Deviation
Determine standard deviation of the sample.
End of explanation
#Your code goes here
SE = SD / np.sqrt(sample_size)
print "Standard Error:", SE
Explanation: c. Standard Error
Using the standard deviation and sample_size, determine the standard error for the sample.
End of explanation
#Your code goes here
print "95% Confidence Interval:", (-1.96 * SE + Mean, 1.96 * SE + Mean)
print "90% Confidence Interval:", (-1.64 * SE + Mean, 1.64 * SE + Mean)
print "80% Confidence Interval:", (-1.28 * SE + Mean, 1.28 * SE + Mean)
Explanation: d. Confidence Intervals
Using the standard error and mean, determine 95% (Z = 1.96), 90% (Z = 1.64), and 80% (Z = 1.28) confidence intervals for the sample.
End of explanation
n = 1000
correct = 0
samples = [np.random.normal(loc=POPULATION_MU, scale=POPULATION_SIGMA, size=sample_size) for i in range(n)]
#Your code goes here
for i in range(n):
sample_mean = np.mean(samples[i])
sample_SE = stats.sem(samples[i])
if ((POPULATION_MU >= -1.96 * sample_SE + sample_mean) and (POPULATION_MU <= 1.96 * sample_SE + sample_mean)):
correct += 1;
print "Expected Result:", .95 * 1000
print "Actual Result", correct
Explanation: Exercise 2: Interpreting Confidence Intervals
Assuming our interval was correctly calculated and that the underlying data was independent, if we take many samples and make many 95% confidence intervals, the intervals will contain the true mean 95% of the time. Run 1000 samples and measure how many of their confidence intervals actually contain the true mean.
End of explanation
n = 500
normal_samples = [np.mean(np.random.normal(loc=POPULATION_MU, scale=POPULATION_SIGMA, size=sample_size)) for i in range(n)]
#Your code goes here
plt.hist(normal_samples, 10)
_, pvalue, _, _ = jarque_bera(normal_samples)
print pvalue
if pvalue > 0.05:
print 'The distribution of sample means is likely normal.'
else:
print 'The distribution of sample means is likely not normal.'
Explanation: Exercise 3: Central Limit Theorem
a. Plotting Sample Means - Normal
Assuming our samples are independent, the distribution of the sample means should be normally distributed, regardless of the underlying distribution.
Draw 500 samples of size sample_size from the same normal distribution from question 1, plot the means of each of the samples, and check to see if the distribution of the sample means is normal using the jarque_bera function (see here more information on the Jarque-Bera test)
End of explanation
n = 500
expo_samples = [np.mean(np.random.exponential(POPULATION_MU, sample_size)) for i in range(n)]
#Your code goes here
plt.hist(expo_samples, 10)
_, pvalue, _, _ = jarque_bera(expo_samples)
print pvalue
if pvalue > 0.05:
print 'The distribution of sample means is likely normal, despite the underlying distribution being non-normal (exponential).'
else:
print 'The distribution of sample means is likely not normal.'
Explanation: b. Plotting Sample Means - Exponential
Draw 500 samples of size sample_size from a new exponential distribution, plot the means of each of the samples, and check to see if the distribution of the sample means is normal.
End of explanation
n = 500
autocorrelated_samples = [(generate_autocorrelated_data(0.5, 0, 1, sample_size) + POPULATION_MU) for i in range(n)]
autocorrelated_means = [np.mean(autocorrelated_samples[i]) for i in range(n)]
#Your code goes here
plt.hist(autocorrelated_means, 10)
_, pvalue, _, _ = jarque_bera(autocorrelated_means)
print pvalue
if pvalue > 0.05:
print 'The distribution of sample means is likely normal, despite an autocorrelated underlying distribution.'
else:
print 'The distribution of sample means is likely not normal.'
Explanation: c.i Plotting Sample Means - Autocorrelated
Draw 500 samples of size sample_size from a new autocorrelated (dependent) distribution, plot the means of each of the samples, and check to see if the distribution of the sample means is normal.
End of explanation
n = 500
autocorrelated_samples = [(generate_autocorrelated_data(0.5, 0, 1, sample_size) + POPULATION_MU) for i in range(n)]
autocorrelated_stds = [np.std(autocorrelated_samples[i]) for i in range(n)]
#Your code goes here
plt.hist(autocorrelated_stds, 10)
_, pvalue, _, _ = jarque_bera(autocorrelated_stds)
print pvalue
if pvalue > 0.05:
print 'The distribution of sample standard deviations is likely normal.'
else:
print 'The distribution of sample standard deviations is likely not normal, due to the autocorrelated underlying distribution and the different assumptions for the CLT for means and for standard deviations.'
Explanation: c.ii Plotting Sample Standard Deviations - Autocorrelated
Draw 500 samples of size sample_size from the same autocorrelated distribution, plot the standard deviations of each of the samples, and check to see if the distribution of the sample standard deviations is normal.
End of explanation
n = 100
small_size = 3
correct = 0
samples = [np.random.normal(loc=POPULATION_MU, scale=POPULATION_SIGMA, size=small_size) for i in range(n)]
#Your code goes here
for i in range(n):
sample_mean = np.mean(samples[i])
sample_SE = stats.sem(samples[i])
if ((POPULATION_MU >= -1.96 * sample_SE + sample_mean) and (POPULATION_MU <= 1.96 * sample_SE + sample_mean)):
correct += 1
print "Expected Result:", .95 * n
print "Actual Result:", correct
print "Due to the small sample size, the actual number of confidence intervals containing the population mean is much lower than what we would expect given a correctly calibrated interval."
Explanation: Exercise 4: Small Sample Sizes
a. Error Due to Small Sample Size
Run 100 samples of size small_size and measure how many of their 95% confidence intervals actually contain the true mean.
End of explanation
n = 100
small_size = 5
correct = 0
samples = [np.random.normal(loc=POPULATION_MU, scale=POPULATION_SIGMA, size=small_size) for i in range(n)]
#Your code goes here
for i in range(n):
sample_mean = np.mean(samples[i])
sample_SE = stats.sem(samples[i])
h = sample_SE * stats.t.ppf((1+0.95) / 2, len(samples[i])-1)
if ((POPULATION_MU >= sample_mean - h) and (POPULATION_MU <= sample_mean + h)):
correct += 1
print "Expected Result:", .95 * n
print "Actual Result:", correct
print "After using the t-distribution to correct for the smaller sample size, the actual number of confidence intervals containing the population mean is about what we expected."
Explanation: b. T-distribution Correction
Run 100 samples of size small_size, this time accouting for the small sample size using a t-distribution, and measure how many of their 95% confidence intervals actually contain the true mean.
End of explanation
n = 100
correct = 0
theta = 0.5
noise_mu = 0
noise_sigma = 1
#Your code goes here
for i in range(n):
X = generate_autocorrelated_data(theta, noise_mu, noise_sigma, sample_size) + POPULATION_MU
sample_mean = np.mean(X)
sample_SE = np.std(X) / np.sqrt(sample_size)
if ((POPULATION_MU >= -1.96 * sample_SE + sample_mean) and (POPULATION_MU <= 1.96 * sample_SE + sample_mean)):
correct += 1
print "Expected Result:", .95 * n
print "Actual Result:", correct
print "Because the underlying data was autocorrelated, the actual number of confidence intervals containing the population mean is much lower than what we expected."
Explanation: Exercise 5: Dependence
a. Error due to Dependence
Run 100 samples of the following autocorrelated distribution and measure how many of their 95% confidence intervals actually contain the true mean. (Use the helper function generate_autocorrelated_data(theta, noise_mu, noise_sigma, sample_size) to generate the samples)
End of explanation
n = 100
correct = 0
#Your code goes here
for i in range(n):
X = generate_autocorrelated_data(theta, noise_mu, noise_sigma, sample_size) + POPULATION_MU
sample_mean = np.mean(X)
sample_SE = np.std(X) / np.sqrt(sample_size)
h = sample_SE * stats.t.ppf((1+0.95) / 2, len(X)-1)
if ((POPULATION_MU >= sample_mean - h) and (POPULATION_MU <= sample_mean + h)):
correct += 1
print "Expected Result:", .95 * n
print "Actual Result:", correct
print "We did not see a significant improvement in the actual number of confidence intervals containing the population mean. This is because a t-distribution only corrects for small sample sizes, not autocorrelation."
Explanation: b. T-distribution Correction
Run 100 samples from the autocorrelated distribution, this time attempting to account for the autocorrelation using a t-distribution, and measure how many of their 95% confidence intervals actually contain the true mean to see if the correction works.
End of explanation
X = generate_autocorrelated_data(theta, noise_mu, noise_sigma, sample_size) + POPULATION_MU
#Your code goes here
print newey_west_matrix(X)
Explanation: c. Newey-West Matrix
Use the newey_west_matrix helper function to compute an adjusted (robust) covariance matrix for a single sample of the autocorrelated data.
End of explanation
n = 100
correct = 0
#Your code goes here
for i in range(n):
X = generate_autocorrelated_data(theta, noise_mu, noise_sigma, sample_size) + POPULATION_MU
sample_mean = np.mean(X)
sample_SE = newey_west_SE(X)
if ((POPULATION_MU >= -1.96 * sample_SE + sample_mean) and (POPULATION_MU <= 1.96 * sample_SE + sample_mean)):
correct += 1
print "New Standard Error:", sample_SE
print "Expected Result:", .95 * n
print "Actual Result:", correct
print "After accounting for autocorrelation by finding a Newey-West standard error, the actual number of confidence intervals containing the population mean is about what we expected."
Explanation: d. Newey-West Correction
Run 100 samples of the following autocorrelated distribution, this time accounting for the autocorrelation by using a Newey-West correction on the standard error, and measure how many of their 95% confidence intervals actually contain the true mean to see if the correction works. (Use the helper function newey_west_SE to find the corrected standard error)
End of explanation |
14,758 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Variables
In computer programming, a variable is a storage location and an associated symbolic name (an identifier) which contains some known or unknown quantity or information, a value.
C
c
int x = 29;
float y = 321.321;
double z = 32132132132133.21;
Python
python
x = 343 # Integer
y = 4324n4324 # Flaot
Variable names in Python can contain alphanumerical characters a-z, A-Z, 0-9 and some special characters such as _. Normal variable names should start with a letter. _ can be used for special cases.
By convension, variable names start with a lower-case letter, and Class names start with an upper-case letter.
In addition, there are a number of Python keywords that cannot be used as variable names. These keywords are
Step1: If we assign a new value to a variable, its type can change.
Step2: We get a NameError when we try to access a variable which has not been defined.
Step3: Fundamental data types
Step4: Typecasting
When you want to change variables from one type to another.
We are using inbuilt functions to do the typecasting.
Later we'll show you how to write your own functions.
Step5: Basic boolean algebra
and
or
not
Step6: Some universal truths!
Step7: Strings
Let's get deep!
Strings are the variable type that is used for storing text messages.
Step8: String slicing
We can extract a part of a string.
Indexing starts with 0 and not 1!
Step9: String concatenation and formatting
Step10: Important string functions
strip()
Removes the whitespaces from the ends of a string.
Step11: split()
Splits the string into a list according to the passed delimiter
Step12: replace()
Replaces a substring of the string with the passed string
Step13: find()/index()
Searches for the passed value in the entire string and returns its index.
The only difference between find() and index() is that find returns -1 when the string is not found while index() raises an error.
Step14: Lists
The list is a most versatile datatype available in Python which can be written as a list of comma-separated values (items) between square brackets.
Step15: So the best thing about a list is that it can hold any data type inside it and is also very fast. You can iterate through millions of values in a list in a matter of seconds
indexing
Step16: list.index()
Similar to the index function of string. Returns the index of the specified object.
Step17: list.append()
Adds a new entry at the end of the list
Step18: list.pop()
Removes the last value from the list if no index is specified, otherwise removes the object at the specified index
Step19: list.extend()
Extends the list with the values of the parameter
Step20: list.count()
Counts the number of occurences of the given object
Step21: list.sort()
Sorts the given list.
Step22: list.insert()
Inserts the passed object at the specified position.
Step23: list.reverse()
Reverse the contents of the list.
Step24: range()
Generates a list from the range of numbers provided.
Step25: Dictionary
Dictionaries consist of pairs (called items) of keys and their corresponding values.
Python dictionaries are also known as associative arrays or hash tables.
Step26: dict.keys()
Get all the keys form the dictionary
Step27: dict.values()
Get all the values from the dictionary
Step28: dict.items()
Get all the key
Step29: dict.has_key()
Check if a particular entry exists in a dictionary
Step30: dict.update()
Update the first dictionary with the contents of the second dictionary
Step31: if / else
Note
Step32: if / else with strings and lists | Python Code:
'''
variable assignments
this is a variable assignment
'''
x = 1.0
my_variable = 12
print type(x)
print type(my_variable)
Explanation: Variables
In computer programming, a variable is a storage location and an associated symbolic name (an identifier) which contains some known or unknown quantity or information, a value.
C
c
int x = 29;
float y = 321.321;
double z = 32132132132133.21;
Python
python
x = 343 # Integer
y = 4324n4324 # Flaot
Variable names in Python can contain alphanumerical characters a-z, A-Z, 0-9 and some special characters such as _. Normal variable names should start with a letter. _ can be used for special cases.
By convension, variable names start with a lower-case letter, and Class names start with an upper-case letter.
In addition, there are a number of Python keywords that cannot be used as variable names. These keywords are:
and, as, assert, break, class, continue, def, del, elif, else, except,
exec, finally, for, from, global, if, import, in, is, lambda, not, or,
pass, print, raise, return, try, while, with, yield
Note: Be aware of the keyword lambda, which could easily be a natural variable name in a scientific program. But being a keyword, it cannot be used as a variable name.
Assignment
The assignment operator in Python is =. Python is a dynamically typed language, so we do not need to specify the type of a variable when we create one.
Assigning a value to a new variable creates the variable:
End of explanation
x = 1
type(x)
Explanation: If we assign a new value to a variable, its type can change.
End of explanation
t = x + y
Explanation: We get a NameError when we try to access a variable which has not been defined.
End of explanation
# integers
x = 1
type(x)
# float
x = 1.0
type(x)
# boolean
b1 = True
b2 = False
type(b1)
# Booleans are integers in Python
print True + True # True has a value equal to 1
print False + False # False has a value equal to 0
# complex numbers: note the use of `j` to specify the imaginary part
x = 1.0 - 1.0j
type(x)
print x
# Real part
print x.real
# Imaginary
print x.imag
Explanation: Fundamental data types
End of explanation
a = 1
a
float(a)
z = 2 + 3j
z
float(z.imag)
complex(a)
int(45.55)
bool(0)
Explanation: Typecasting
When you want to change variables from one type to another.
We are using inbuilt functions to do the typecasting.
Later we'll show you how to write your own functions.
End of explanation
True and True
True and False
True and True and False and True
True or False
False or False
1 and 1
1 and 0
20 and 30 # It just gives you the latter
200 and 30 # I told ya!
0 and 431 # Not here!
1 or 1
1 or 0
21 or 42 # Gives you former
3214 or 42 # Proved!
not True
not False
not 1
not 0
not 420 # Yep!
Explanation: Basic boolean algebra
and
or
not
End of explanation
1 < 2
2 != 3
22 > 11
5 == 5
2 + 3 == 5
Explanation: Some universal truths!
End of explanation
s = "Hello world"
type(s)
# length of the string: the number of characters
len(s)
s # `s` is still the same! Strings are immutable.
s[0]
s[1]
s[2], s[3], s[4]
Explanation: Strings
Let's get deep!
Strings are the variable type that is used for storing text messages.
End of explanation
s[0:5]
s[:5] # From start.
s[6:] # Till end.
s[:] # From start and till end!
Explanation: String slicing
We can extract a part of a string.
Indexing starts with 0 and not 1!
End of explanation
a = "foo"
b = "bar"
a + b
a + " " + b
s.count("l")
s.endswith("ld") # Also works with range
s.upper()
s.lower()
s2 = " " + s + "\n"
s2
Explanation: String concatenation and formatting
End of explanation
s2.strip() # Performs both lstrip() and rstrip()
s2.strip("\n")
Explanation: Important string functions
strip()
Removes the whitespaces from the ends of a string.
End of explanation
s2.split()
s2.split("l")
Explanation: split()
Splits the string into a list according to the passed delimiter
End of explanation
# replace a substring in a string with somethign else
s2.replace("world", "test")
Explanation: replace()
Replaces a substring of the string with the passed string
End of explanation
s2.find("wo")
s2.index("wo")
s2.find("te")
s2.index("te")
Explanation: find()/index()
Searches for the passed value in the entire string and returns its index.
The only difference between find() and index() is that find returns -1 when the string is not found while index() raises an error.
End of explanation
list1 = [1, 2, 4, 2]
print list1
list2 = ["hey", "how", "are", "you"]
print list2
list3 = [1, 2, 4, 'hello', '34', 'hi', 23, [45, 23, 7], 2]
print list3
Explanation: Lists
The list is a most versatile datatype available in Python which can be written as a list of comma-separated values (items) between square brackets.
End of explanation
list1[2]
list1[-1]
Explanation: So the best thing about a list is that it can hold any data type inside it and is also very fast. You can iterate through millions of values in a list in a matter of seconds
indexing
End of explanation
list4.index(5)
list4.index(6)
Explanation: list.index()
Similar to the index function of string. Returns the index of the specified object.
End of explanation
list1.append(100)
print list1
list4 = list1 + list2
print list4
Explanation: list.append()
Adds a new entry at the end of the list
End of explanation
list4.pop()
list4.pop(1)
print list4
Explanation: list.pop()
Removes the last value from the list if no index is specified, otherwise removes the object at the specified index
End of explanation
tuple1 = (1, 2, 3)
print list4 + tuple1
list4 = [1,24,5,5]
list4.extend(tuple1)
print list4
Explanation: list.extend()
Extends the list with the values of the parameter
End of explanation
list4.count(5)
Explanation: list.count()
Counts the number of occurences of the given object
End of explanation
list4.sort()
print list4
list5 = ["hey", "how", "are", "you"]
list5.sort()
print list5
list5.sort(key=lambda x : x[len(x)-1]) # Can also take functions as arguments for sorting.
print list5
list5.sort(reverse=True) # Sort in reverse order
print list5
Explanation: list.sort()
Sorts the given list.
End of explanation
print list5
list5.insert(0, "hi")
print list5
Explanation: list.insert()
Inserts the passed object at the specified position.
End of explanation
print list5
list5.reverse()
print list5
Explanation: list.reverse()
Reverse the contents of the list.
End of explanation
range(1,20)
range(1,20,3)
Explanation: range()
Generates a list from the range of numbers provided.
End of explanation
dict1 = {'Alice': '2341', 'Beth': '9102', 'Cecil': '3258'}
dict1['Beth'] # Accessing the value of a key
dict1['Alice'] = '3111' # Editing the value of a key
print dict1
dict1['James'] = '4212' # Adding a value to a dict
print dict1
del dict1['Alice']
print dict1
Explanation: Dictionary
Dictionaries consist of pairs (called items) of keys and their corresponding values.
Python dictionaries are also known as associative arrays or hash tables.
End of explanation
print dict1.keys()
Explanation: dict.keys()
Get all the keys form the dictionary
End of explanation
print dict1.values()
Explanation: dict.values()
Get all the values from the dictionary
End of explanation
print dict1.items()
Explanation: dict.items()
Get all the key:value pairs from the dictionary as tuples in a list
End of explanation
print dict1.has_key('Cecil')
dict1['Alice']
print dict1.has_key('Alice')
Explanation: dict.has_key()
Check if a particular entry exists in a dictionary
End of explanation
dict2 = {'a':1, 'b':'54', 'c':'hello'}
dict2.update(dict1)
print dict2
Explanation: dict.update()
Update the first dictionary with the contents of the second dictionary
End of explanation
if 2 > 1:
print "Hello World!"
if 1 > 2:
print "Hello World!"
else:
print "World is not a beautiful place!"
if 1 > 2:
print "Hello World!"
elif 2 > 3:
print "World is even better!"
elif 3 > 4:
print "I don't want to live here!"
else:
print "World is not a beautiful place!"
Explanation: if / else
Note: Python uses indentation to group things.
End of explanation
print list1
print s
if 4 in list1:
print "Hey! 2 exists in list1"
if 2 in list1:
i = list1.index(2)
print "Search successful. Found at index {0}".format((i+1))
if 'o' in s:
print "'o' exists!"
if 'llo' in s:
print "'llo' exists!"
if 'house' not in s:
print "house not found!"
Explanation: if / else with strings and lists
End of explanation |
14,759 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Batch Normalization – Practice
Batch normalization is most useful when building deep neural networks. To demonstrate this, we'll create a convolutional neural network with 20 convolutional layers, followed by a fully connected layer. We'll use it to classify handwritten digits in the MNIST dataset, which should be familiar to you by now.
This is not a good network for classfying MNIST digits. You could create a much simpler network and get better results. However, to give you hands-on experience with batch normalization, we had to make an example that was
Step3: Batch Normalization using tf.layers.batch_normalization<a id="example_1"></a>
This version of the network uses tf.layers for almost everything, and expects you to implement batch normalization using tf.layers.batch_normalization
We'll use the following function to create fully connected layers in our network. We'll create them with the specified number of neurons and a ReLU activation function.
This version of the function does not include batch normalization.
Step6: We'll use the following function to create convolutional layers in our network. They are very basic
Step8: Run the following cell, along with the earlier cells (to load the dataset and define the necessary functions).
This cell builds the network without batch normalization, then trains it on the MNIST dataset. It displays loss and accuracy data periodically while training.
Step10: With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.)
Using batch normalization, you'll be able to train this same network to over 90% in that same number of batches.
Add batch normalization
We've copied the previous three cells to get you started. Edit these cells to add batch normalization to the network. For this exercise, you should use tf.layers.batch_normalization to handle most of the math, but you'll need to make a few other changes to your network to integrate batch normalization. You may want to refer back to the lesson notebook to remind yourself of important things, like how your graph operations need to know whether or not you are performing training or inference.
If you get stuck, you can check out the Batch_Normalization_Solutions notebook to see how we did things.
TODO
Step12: TODO
Step13: TODO
Step15: With batch normalization, you should now get an accuracy over 90%. Notice also the last line of the output
Step17: TODO
Step18: TODO | Python Code:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False)
Explanation: Batch Normalization – Practice
Batch normalization is most useful when building deep neural networks. To demonstrate this, we'll create a convolutional neural network with 20 convolutional layers, followed by a fully connected layer. We'll use it to classify handwritten digits in the MNIST dataset, which should be familiar to you by now.
This is not a good network for classfying MNIST digits. You could create a much simpler network and get better results. However, to give you hands-on experience with batch normalization, we had to make an example that was:
1. Complicated enough that training would benefit from batch normalization.
2. Simple enough that it would train quickly, since this is meant to be a short exercise just to give you some practice adding batch normalization.
3. Simple enough that the architecture would be easy to understand without additional resources.
This notebook includes two versions of the network that you can edit. The first uses higher level functions from the tf.layers package. The second is the same network, but uses only lower level functions in the tf.nn package.
Batch Normalization with tf.layers.batch_normalization
Batch Normalization with tf.nn.batch_normalization
The following cell loads TensorFlow, downloads the MNIST dataset if necessary, and loads it into an object named mnist. You'll need to run this cell before running anything else in the notebook.
End of explanation
DO NOT MODIFY THIS CELL
def fully_connected(prev_layer, num_units):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
return layer
Explanation: Batch Normalization using tf.layers.batch_normalization<a id="example_1"></a>
This version of the network uses tf.layers for almost everything, and expects you to implement batch normalization using tf.layers.batch_normalization
We'll use the following function to create fully connected layers in our network. We'll create them with the specified number of neurons and a ReLU activation function.
This version of the function does not include batch normalization.
End of explanation
DO NOT MODIFY THIS CELL
def conv_layer(prev_layer, layer_depth):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)
return conv_layer
Explanation: We'll use the following function to create convolutional layers in our network. They are very basic: we're always using a 3x3 kernel, ReLU activation functions, strides of 1x1 on layers with odd depths, and strides of 2x2 on layers with even depths. We aren't bothering with pooling layers at all in this network.
This version of the function does not include batch normalization.
End of explanation
DO NOT MODIFY THIS CELL
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
Explanation: Run the following cell, along with the earlier cells (to load the dataset and define the necessary functions).
This cell builds the network without batch normalization, then trains it on the MNIST dataset. It displays loss and accuracy data periodically while training.
End of explanation
def fully_connected(prev_layer, num_units, istraining):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
layer = tf.layers.batch_normalization(inputs=layer, training=istraining)
layer = tf.nn.relu(layer)
return layer
Explanation: With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.)
Using batch normalization, you'll be able to train this same network to over 90% in that same number of batches.
Add batch normalization
We've copied the previous three cells to get you started. Edit these cells to add batch normalization to the network. For this exercise, you should use tf.layers.batch_normalization to handle most of the math, but you'll need to make a few other changes to your network to integrate batch normalization. You may want to refer back to the lesson notebook to remind yourself of important things, like how your graph operations need to know whether or not you are performing training or inference.
If you get stuck, you can check out the Batch_Normalization_Solutions notebook to see how we did things.
TODO: Modify fully_connected to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps.
End of explanation
def conv_layer(prev_layer, layer_depth, istraining):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=None, use_bias=False)
conv_layer = tf.layers.batch_normalization(conv_layer, training=istraining)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
Explanation: TODO: Modify conv_layer to add batch normalization to the convolutional layers it creates. Feel free to change the function's parameters if it helps.
End of explanation
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
istraining = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, istraining)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, istraining)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, istraining:True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
istraining:False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, istraining:False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
istraining:False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
istraining:False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
istraining:False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
Explanation: TODO: Edit the train function to support batch normalization. You'll need to make sure the network knows whether or not it is training, and you'll need to make sure it updates and uses its population statistics correctly.
End of explanation
def fully_connected(prev_layer, num_units, istraining):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=None, use_bias=False)
gamma = tf.Variable(tf.ones([num_units]))
beta = tf.Variable(tf.zeros([num_units]))
pop_mean = tf.Variable(tf.zeros([num_units]), trainable=False)
pop_variance = tf.Variable(tf.ones([num_units]), trainable=False)
epsilon = 1e-3
def batch_norm_training():
batch_mean, batch_variance = tf.nn.moments(layer, axes=[0])
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(layer, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
return tf.nn.batch_normalization(layer, pop_mean, pop_variance, beta, gamma, epsilon)
batch_normalization_out = tf.cond(istraining, batch_norm_training, batch_norm_inference)
return tf.nn.relu(batch_normalization_out)
Explanation: With batch normalization, you should now get an accuracy over 90%. Notice also the last line of the output: Accuracy on 100 samples. If this value is low while everything else looks good, that means you did not implement batch normalization correctly. Specifically, it means you either did not calculate the population mean and variance while training, or you are not using those values during inference.
Batch Normalization using tf.nn.batch_normalization<a id="example_2"></a>
Most of the time you will be able to use higher level functions exclusively, but sometimes you may want to work at a lower level. For example, if you ever want to implement a new feature – something new enough that TensorFlow does not already include a high-level implementation of it, like batch normalization in an LSTM – then you may need to know these sorts of things.
This version of the network uses tf.nn for almost everything, and expects you to implement batch normalization using tf.nn.batch_normalization.
Optional TODO: You can run the next three cells before you edit them just to see how the network performs without batch normalization. However, the results should be pretty much the same as you saw with the previous example before you added batch normalization.
TODO: Modify fully_connected to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps.
Note: For convenience, we continue to use tf.layers.dense for the fully_connected layer. By this point in the class, you should have no problem replacing that with matrix operations between the prev_layer and explicit weights and biases variables.
End of explanation
def conv_layer(prev_layer, layer_depth, istraining):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
in_channels = prev_layer.get_shape().as_list()[3]
out_channels = layer_depth*4
weights = tf.Variable(
tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05))
# bias = tf.Variable(tf.zeros(out_channels))
conv_layer = tf.nn.conv2d(prev_layer, weights, strides=[1,strides, strides, 1], padding='SAME')
# conv_layer = tf.nn.bias_add(conv_layer, bias)
# conv_layer = tf.nn.relu(conv_layer)
gamma = tf.Variable(tf.ones([out_channels]))
beta = tf.Variable(tf.zeros([out_channels]))
pop_mean = tf.Variable(tf.zeros([out_channels]), trainable=False)
pop_variance = tf.Variable(tf.ones([out_channels]), trainable=False)
epsilon = 1e-3
def batch_norm_training():
batch_mean, batch_variance = tf.nn.moments(conv_layer, [0,1,2], keep_dims=False)
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(conv_layer, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
return tf.nn.batch_normalization(conv_layer, pop_mean, pop_variance, beta, gamma, epsilon)
batch_normalization_out = tf.cond(istraining, batch_norm_training, batch_norm_inference)
return tf.nn.relu(batch_normalization_out)
Explanation: TODO: Modify conv_layer to add batch normalization to the fully connected layers it creates. Feel free to change the function's parameters if it helps.
Note: Unlike in the previous example that used tf.layers, adding batch normalization to these convolutional layers does require some slight differences to what you did in fully_connected.
End of explanation
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
istraining = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, istraining)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, istraining)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, istraining:True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
istraining:False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, istraining:False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels, istraining:False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels, istraining:False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]], istraining:False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
Explanation: TODO: Edit the train function to support batch normalization. You'll need to make sure the network knows whether or not it is training.
End of explanation |
14,760 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Text-to-Video retrieval with S3D MIL-NCE
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https
Step3: 导入 TF-Hub 模型
本教程演示了如何使用 TensorFlow Hub 中的 S3D MIL-NCE 模型执行文本到视频检索,以便找到与给定文本查询最相似的视频。
该模型有 2 个签名,一个用于生成视频嵌入向量,另一个用于生成文本嵌入向量,我们利用这些嵌入向量来查找嵌入向量空间中的最近邻。
Step4: 演示文本到视频检索 | Python Code:
!pip install -q opencv-python
import os
import tensorflow.compat.v2 as tf
import tensorflow_hub as hub
import numpy as np
import cv2
from IPython import display
import math
Explanation: Text-to-Video retrieval with S3D MIL-NCE
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://tensorflow.google.cn/hub/tutorials/text_to_video_retrieval_with_s3d_milnce"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png">View 在 TensorFlow.org 上查看</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/hub/tutorials/text_to_video_retrieval_with_s3d_milnce.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png">在 Google Colab 中运行 </a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/hub/tutorials/text_to_video_retrieval_with_s3d_milnce.ipynb"> <img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png"> 在 GitHub 上查看源代码</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/hub/tutorials/text_to_video_retrieval_with_s3d_milnce.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png">下载笔记本</a></td>
<td> <a href="https://tfhub.dev/deepmind/mil-nce/s3d/1"><img src="https://tensorflow.google.cn/images/hub_logo_32px.png">看到 TF Hub models</a>
</td>
</table>
End of explanation
# Load the model once from TF-Hub.
hub_handle = 'https://tfhub.dev/deepmind/mil-nce/s3d/1'
hub_model = hub.load(hub_handle)
def generate_embeddings(model, input_frames, input_words):
Generate embeddings from the model from video frames and input words.
# Input_frames must be normalized in [0, 1] and of the shape Batch x T x H x W x 3
vision_output = model.signatures['video'](tf.constant(tf.cast(input_frames, dtype=tf.float32)))
text_output = model.signatures['text'](tf.constant(input_words))
return vision_output['video_embedding'], text_output['text_embedding']
# @title Define video loading and visualization functions { display-mode: "form" }
# Utilities to open video files using CV2
def crop_center_square(frame):
y, x = frame.shape[0:2]
min_dim = min(y, x)
start_x = (x // 2) - (min_dim // 2)
start_y = (y // 2) - (min_dim // 2)
return frame[start_y:start_y+min_dim,start_x:start_x+min_dim]
def load_video(video_url, max_frames=32, resize=(224, 224)):
path = tf.keras.utils.get_file(os.path.basename(video_url)[-128:], video_url)
cap = cv2.VideoCapture(path)
frames = []
try:
while True:
ret, frame = cap.read()
if not ret:
break
frame = crop_center_square(frame)
frame = cv2.resize(frame, resize)
frame = frame[:, :, [2, 1, 0]]
frames.append(frame)
if len(frames) == max_frames:
break
finally:
cap.release()
frames = np.array(frames)
if len(frames) < max_frames:
n_repeat = int(math.ceil(max_frames / float(len(frames))))
frames = frames.repeat(n_repeat, axis=0)
frames = frames[:max_frames]
return frames / 255.0
def display_video(urls):
html = '<table>'
html += '<tr><th>Video 1</th><th>Video 2</th><th>Video 3</th></tr><tr>'
for url in urls:
html += '<td>'
html += '<img src="{}" height="224">'.format(url)
html += '</td>'
html += '</tr></table>'
return display.HTML(html)
def display_query_and_results_video(query, urls, scores):
Display a text query and the top result videos and scores.
sorted_ix = np.argsort(-scores)
html = ''
html += '<h2>Input query: <i>{}</i> </h2><div>'.format(query)
html += 'Results: <div>'
html += '<table>'
html += '<tr><th>Rank #1, Score:{:.2f}</th>'.format(scores[sorted_ix[0]])
html += '<th>Rank #2, Score:{:.2f}</th>'.format(scores[sorted_ix[1]])
html += '<th>Rank #3, Score:{:.2f}</th></tr><tr>'.format(scores[sorted_ix[2]])
for i, idx in enumerate(sorted_ix):
url = urls[sorted_ix[i]];
html += '<td>'
html += '<img src="{}" height="224">'.format(url)
html += '</td>'
html += '</tr></table>'
return html
# @title Load example videos and define text queries { display-mode: "form" }
video_1_url = 'https://upload.wikimedia.org/wikipedia/commons/b/b0/YosriAirTerjun.gif' # @param {type:"string"}
video_2_url = 'https://upload.wikimedia.org/wikipedia/commons/e/e6/Guitar_solo_gif.gif' # @param {type:"string"}
video_3_url = 'https://upload.wikimedia.org/wikipedia/commons/3/30/2009-08-16-autodrift-by-RalfR-gif-by-wau.gif' # @param {type:"string"}
video_1 = load_video(video_1_url)
video_2 = load_video(video_2_url)
video_3 = load_video(video_3_url)
all_videos = [video_1, video_2, video_3]
query_1_video = 'waterfall' # @param {type:"string"}
query_2_video = 'playing guitar' # @param {type:"string"}
query_3_video = 'car drifting' # @param {type:"string"}
all_queries_video = [query_1_video, query_2_video, query_3_video]
all_videos_urls = [video_1_url, video_2_url, video_3_url]
display_video(all_videos_urls)
Explanation: 导入 TF-Hub 模型
本教程演示了如何使用 TensorFlow Hub 中的 S3D MIL-NCE 模型执行文本到视频检索,以便找到与给定文本查询最相似的视频。
该模型有 2 个签名,一个用于生成视频嵌入向量,另一个用于生成文本嵌入向量,我们利用这些嵌入向量来查找嵌入向量空间中的最近邻。
End of explanation
# Prepare video inputs.
videos_np = np.stack(all_videos, axis=0)
# Prepare text input.
words_np = np.array(all_queries_video)
# Generate the video and text embeddings.
video_embd, text_embd = generate_embeddings(hub_model, videos_np, words_np)
# Scores between video and text is computed by dot products.
all_scores = np.dot(text_embd, tf.transpose(video_embd))
# Display results.
html = ''
for i, words in enumerate(words_np):
html += display_query_and_results_video(words, all_videos_urls, all_scores[i, :])
html += '<br>'
display.HTML(html)
Explanation: 演示文本到视频检索
End of explanation |
14,761 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Data Handling Utilities
tiff file directory to tiff stack conversion
A utility script that can be executed from the command line to convert tif files in a directory into a tif stack
Step1: The --pattern argument allows you to define a regular expression pattern for the files in --dir to build a stack only from files that match the regular expression pattern, ie --pattern ChanA will build a stack from files that contain ChanA in its name.
To select files whose name begin with ChanA write --pattern ^Chan.
Speed
It took approximately 3 minutes to build a stack with 3000 files.
tiff stack extraction from raw files
A utility script that can be used to extract tiff stacks from raw data files acquired by ThorLabs microscope.
Step2: tiff stacks as numpy arrays
Furthermore I wrote functions that can easily load these tiff stacks into python (also an IPython notebook) as numpy arrays.
But because loading these large tiffs takes similarly long as building a stack, I added a caching layer that saves faster loading hdf5 binaries of the arrays.
This also explains the --nocache option for the build_tiff_stack.py scripts. By default the script right away saves a fast loading cache file. Its name is simply filename.hdf5. But be aware that this doubles the volume of your data.
However, using this caching tiff stacks now load like a charm
Step3: on kumo it takes on average ~ 0.8 s to load a 1.5 G stack, whereas on my computer it takes now on average 2.13 s to load the 1.5 G stacks.
We just saw, the utilities come with a logger .. | Python Code:
%%bash
build_tiff_stack.py --help
Explanation: Data Handling Utilities
tiff file directory to tiff stack conversion
A utility script that can be executed from the command line to convert tif files in a directory into a tif stack:
End of explanation
%%bash
extract_channels_from_raw.py --help
Explanation: The --pattern argument allows you to define a regular expression pattern for the files in --dir to build a stack only from files that match the regular expression pattern, ie --pattern ChanA will build a stack from files that contain ChanA in its name.
To select files whose name begin with ChanA write --pattern ^Chan.
Speed
It took approximately 3 minutes to build a stack with 3000 files.
tiff stack extraction from raw files
A utility script that can be used to extract tiff stacks from raw data files acquired by ThorLabs microscope.
End of explanation
datafiles = [
'/home/michael/datac/data1/ChanA_0001_0001_0001.tif',
'/home/michael/datac/data1/ChanB_0001_0001_0001.tif',
'/home/michael/datac/data2/ChanA_0001_0001_0001.tif',
'/home/michael/datac/data2/ChanB_0001_0001_0001.tif',
'/home/michael/datac/data3/ChanA_0001_0001_0001.tif',
'/home/michael/datac/data3/ChanB_0001_0001_0001.tif',
]
import neuralyzer
%%timeit
stackdata = neuralyzer.get_data(datafiles[1])
Explanation: tiff stacks as numpy arrays
Furthermore I wrote functions that can easily load these tiff stacks into python (also an IPython notebook) as numpy arrays.
But because loading these large tiffs takes similarly long as building a stack, I added a caching layer that saves faster loading hdf5 binaries of the arrays.
This also explains the --nocache option for the build_tiff_stack.py scripts. By default the script right away saves a fast loading cache file. Its name is simply filename.hdf5. But be aware that this doubles the volume of your data.
However, using this caching tiff stacks now load like a charm:
End of explanation
stackdata = neuralyzer.get_data(datafiles[0])
whos
Explanation: on kumo it takes on average ~ 0.8 s to load a 1.5 G stack, whereas on my computer it takes now on average 2.13 s to load the 1.5 G stacks.
We just saw, the utilities come with a logger ..
End of explanation |
14,762 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Generate data needed for visualization in Tableau
Start with the standard imports we have used for every notebook in this class.
Step1: Each of the datasheets downloaded from ELSI had download metadata on the top of them and total and key information on the bottom of them that were not data rows. This metadata, total, and key information was manually deleted before import. Some of the files had ="0" instead of 0 in the cells. This was found/replaced before import using the sed -i '' 's/="0"/0/g' *.csv command from the terminal.
Step2: Check the lengths of the datasets to see if we have a row for every school.
Step3: Drop all of the duplicate columns.
Step4: Join all of the school datasets.
Step5: This is the full list of all column names in the schools dataframe. | Python Code:
%matplotlib inline
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("poster")
Explanation: Generate data needed for visualization in Tableau
Start with the standard imports we have used for every notebook in this class.
End of explanation
#CITATION: This is the data from National Center for Education Statistics on Schools
#Schools for all 50 states and Washington, D.C.
#http://nces.ed.gov/ccd/elsi/
#ELSI Root Data Source: U.S. Department of Education National Center for Education Statistics Common Core of Data (CCD) "Public Elementary/Secondary School Universe Survey" 2012-13 v.1a 2013-14 v.1a.
#KEY:
#† indicates that the data are not applicable.
#– indicates that the data are missing.
#‡ indicates that the data do not meet NCES data quality standards.
schoolinformation = pd.read_csv("data/rawdata/schools/2009-2010 SCHOOL Information Tab.csv", dtype=np.str)
schoolcharacteristicsa = pd.read_csv("data/rawdata/schools/2009-2010 SCHOOL CharacteristicsA Tab.csv", dtype=np.str)
schoolcharacteristicsb = pd.read_csv("data/rawdata/schools/2009-2010 SCHOOL CharacteristicsB Tab.csv", dtype=np.str)
schoolenrollment = pd.read_csv("data/rawdata/schools/2009-2010 SCHOOL Enrollments Tab.csv", dtype=np.str)
schoolenrollmentdetails = pd.read_csv("data/rawdata/schools/2009-2010 SCHOOL Enrollment Details Tab.csv", dtype=np.str)
#schoolenrollmentK3 = pd.read_csv("tempdata/2013-2014 SCHOOL Enrollment by Grade, Race-Ethnicity, and Gender Tab PreK-3.csv", dtype=np.str)
#schoolenrollment48 = pd.read_csv("tempdata/2013-2014 SCHOOL Enrollment by Grade, Race-Ethnicity and Gender Tab 4-8.csv", dtype=np.str)
#schoolenrollment912 = pd.read_csv("tempdata/2013-2014 SCHOOL Enrollment by Grade, Race-Ethnicity and Gender Tab 9-Ungraded.csv", dtype=np.str)
schoolteacherstaff = pd.read_csv("data/rawdata/schools/2009-2010 SCHOOL Teacher and Staff Tab.csv", dtype=np.str)
Explanation: Each of the datasheets downloaded from ELSI had download metadata on the top of them and total and key information on the bottom of them that were not data rows. This metadata, total, and key information was manually deleted before import. Some of the files had ="0" instead of 0 in the cells. This was found/replaced before import using the sed -i '' 's/="0"/0/g' *.csv command from the terminal.
End of explanation
print len(schoolinformation)
print len(schoolcharacteristicsa)
print len(schoolcharacteristicsb)
print len(schoolenrollment)
print len(schoolenrollmentdetails)
#print len(schoolenrollmentK3)
#print len(schoolenrollment48)
#print len(schoolenrollment912)
print len(schoolteacherstaff)
Explanation: Check the lengths of the datasets to see if we have a row for every school.
End of explanation
schoolcharacteristicsa = schoolcharacteristicsa.drop(schoolcharacteristicsa.columns[[0, 1, 20]], 1)
schoolcharacteristicsb = schoolcharacteristicsb.drop(schoolcharacteristicsb.columns[[0, 1]], 1)
schoolenrollment = schoolenrollment.drop(schoolenrollment.columns[[0, 1]], 1)
schoolenrollmentdetails = schoolenrollmentdetails.drop(schoolenrollmentdetails.columns[[0, 1]], 1)
#schoolenrollmentK3 = schoolenrollmentK3.drop(schoolenrollmentK3.columns[[0, 1]], 1)
#schoolenrollment48 = schoolenrollment48.drop(schoolenrollment48.columns[[0, 1]], 1)
#schoolenrollment912 = schoolenrollment912.drop(schoolenrollment912.columns[[0, 1, 72]], 1)
schoolinformation.head()
Explanation: Drop all of the duplicate columns.
End of explanation
joinedschool = schoolteacherstaff.join([schoolcharacteristicsa, schoolcharacteristicsb, schoolenrollment, schoolenrollmentdetails])
joinedschool = schoolinformation.merge(joinedschool, 'left', 'School ID - NCES Assigned [Public School] Latest available year', suffixes=('', '_DEL'))
#Need to get rid of Excel syntax ="" from some of the columns
for i, col in enumerate(joinedschool.columns):
joinedschool[col] = joinedschool[col].map(lambda x: str(x).lstrip('="').rstrip('"'))
#If by chance any rows have NaN, replace with the ELSI standard for missing data '–'
joinedschool = joinedschool.fillna('–')
joinedschool = joinedschool.replace('nan', '–')
# Replacing Missing Data / NA / Bad Quality data with blank, later to be turned into NaN for float columns
# CITATIOIN : http://pandas.pydata.org/pandas-docs/version/0.15.2/missing_data.html
joinedschool = joinedschool.replace('\xe2\x80\x93', '') # Replace "-" (Missing Data) with blank
joinedschool = joinedschool.replace('\xe2\x80\xa0', '') # Replace "†" (Not Applicable) with blank
joinedschool = joinedschool.replace('\xe2\x80\xa1', '') # Replace "‡" (Bad Quality) with blank
joinedschool['i_agency_type_regional_education_services'] = np.where(joinedschool['Agency Type [District] 2009-10']=='4-Regional education services agency', 1, 0)
joinedschool['i_agency_type_local_school_district'] = np.where(joinedschool['Agency Type [District] 2009-10']=='1-Local school district', 1, 0)
joinedschool['i_lgo_PK'] = np.where(joinedschool['Lowest Grade Offered [Public School] 2009-10']=='Prekindergarten', 1, 0)
joinedschool['i_lgo_K'] = np.where(joinedschool['Lowest Grade Offered [Public School] 2009-10']=='Kindergarten', 1, 0)
joinedschool['Black Students [Public School] 2009-10'] = joinedschool['Black Students [Public School] 2009-10'].replace('', np.nan)
joinedschool['Grades 9-12 Students [Public School] 2009-10'] = joinedschool['Grades 9-12 Students [Public School] 2009-10'].replace('', np.nan)
joinedschool['Total Students [Public School] 2009-10'] = joinedschool['Total Students [Public School] 2009-10'].replace('', np.nan)
joinedschool['Black Students [Public School] 2009-10'] = joinedschool['Black Students [Public School] 2009-10'].astype(float)
joinedschool['Grades 9-12 Students [Public School] 2009-10'] = joinedschool['Grades 9-12 Students [Public School] 2009-10'].astype(float)
joinedschool['Total Students [Public School] 2009-10'] = joinedschool['Total Students [Public School] 2009-10'].astype(float)
joinedschool['r_stud_re_B'] = joinedschool['Black Students [Public School] 2009-10']/joinedschool['Total Students [Public School] 2009-10']
joinedschool['r_stud_912'] = joinedschool['Grades 9-12 Students [Public School] 2009-10']/joinedschool['Total Students [Public School] 2009-10']
joinedschool['r_st_TS'] = joinedschool['Pupil/Teacher Ratio [Public School] 2009-10']
joinedschool = joinedschool[['School Name [Public School] 2009-10', 'State Name [Public School] Latest available year', 'School ID - NCES Assigned [Public School] Latest available year', 'Agency ID - NCES Assigned [Public School] Latest available year', 'i_agency_type_regional_education_services', 'i_agency_type_local_school_district', 'i_lgo_PK', 'i_lgo_K', 'r_stud_re_B', 'r_stud_912', 'r_st_TS']]
joinedschool.head()
joinedschool=joinedschool.replace([np.inf, -np.inf], np.nan)
joinedschool.fillna(value=0,inplace=True)
joinedschool.head()
joinedschool.to_csv("data/finaldata/tableauschools.csv")
Explanation: Join all of the school datasets.
End of explanation
for col in joinedschool.columns:
print col
Explanation: This is the full list of all column names in the schools dataframe.
End of explanation |
14,763 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Question 2
Step1: Question 3.1
Step2: Question 3.2 Does the most popular of all the 'lil' has the more followers?
Step3: Question 4 (first part)
Step4: Question 5 Picking up artists
Step5: 6) Will the world explode if a musicians swears? Get an average popularity for their explicit songs vs. their non-explicit songs. How many minutes of explicit songs do they have? Non-explicit?
Step6: First Lil'Uzi Vert
Step7: Now Lil Dicky
Step8: 7 a) Since we're talking about Lils, what about Biggies? How many total "Biggie" artists are there? How many total "Lil"s? If you made 1 request every 5 seconds, how long would it take to download information on all the Lils vs the Biggies?
Step9: 8) Out of the top 50 "Lil"s and the top 50 "Biggie"s, who is more popular on average? | Python Code:
#Question 2 awnser.
for artist in artists:
print(artist['name'], artist['popularity'])
if len(artist['genres']) == 0:
print("no genres listed")
else:
genres = ", ".join(artist['genres'])
print("Genres list: ", genres)
Explanation: Question 2: What genres are most represented in the search results? Edit your previous printout to also display a list of their genres in the format "GENRE_1, GENRE_2, GENRE_3". If there are no genres, print "No genres listed".
End of explanation
most_popular_name = ""
most_popular_score = 0
for artist in artists:
#countring problem:
followers = artist['followers']
print(artist['name'], artist['popularity'])
print(artist['popularity'], "compering to", most_popular_score)
if artist['popularity'] > most_popular_score:
print("fund new one!")
if artist['name'] == "Lil Wayne":
print("oh, nice try", artist['name'])
else:
print("This is the new king!")
most_popular_name = artist['name']
most_popular_score = artist['popularity']
print(artist['name'], followers)
print('anwser for question 2')
print(most_popular_name," is the most popular of all the lil with", most_popular_score,"of score.")
Explanation: Question 3.1: who is the most popular of all lils?
End of explanation
most_followers_name = ""
most_followers_score = 0
artists = data['artists']['items']
for artist in artists:
total_followers = artist['followers']['total']
print(artist['name'], artist['popularity'], total_followers)
if total_followers > most_followers_score:
if artist['name'] == "Lil Wayne":
print("not you wayne")
else:
print('updating the new winner')
most_followers_name = artist['name']
most_followers_score = total_followers
print("who has more followers?")
print(most_followers_name,"is the winner and has arround", most_followers_score, "followers")
if most_followers_name != "Lil Yatchy":
print("Lil Yachty is not the winner in the followers category")
Explanation: Question 3.2 Does the most popular of all the 'lil' has the more followers?
End of explanation
#Question 4 (firt part)
for artist in artists:
if artist['name'] == "Lil' Kim":
print("found lil kim")
print(artist['popularity'])
#Question 4 (second part)
lil_kim_popularity = 62
# AGGREGATION PROBLEM
more_popular_than_lil_kim = []
# THE LOOP
for artist in artists:
if artist['popularity'] > lil_kim_popularity:
print(artist['name'], "is MORE POPULAR with a score of", artist['popularity'])
more_popular_than_lil_kim.append(artist['name'])
new_list = ", ".join(more_popular_than_lil_kim)
print("Artist that are more popular than Lil Kim:", new_list)
Explanation: Question 4 (first part)
End of explanation
for artist in artists:
print(artist['name'], artist['id'] )
Explanation: Question 5 Picking up artists
End of explanation
#Artist chosen:
#Lil_Uzi_Vert_id = 4O15NlyKLIASxsJ0PrXPfz
#Lil_Dicky_id = 1tqhsYv8yBBdwANFNzHtcr
import requests
response = requests.get("https://api.spotify.com/v1/artists/4O15NlyKLIASxsJ0PrXPfz/top-tracks?country=US")
data2 = response.json()
Explanation: 6) Will the world explode if a musicians swears? Get an average popularity for their explicit songs vs. their non-explicit songs. How many minutes of explicit songs do they have? Non-explicit?
End of explanation
type(data2)
data2.keys()
type(data2['tracks'])
List_of_explicit_tracks= []
List_of_non_explicit_tracks= []
tracks = data2['tracks']
for track in tracks:
if track['explicit'] == True:
print(track['name'], "is explicit")
List_of_explicit_tracks.append(track['name'])
else:
print(track['name'],"with a popularity of", track['popularity'], "is not explicit")
List_of_non_explicit_tracks.append(track['name'])
print("#######")
print(len(List_of_explicit_tracks), "tracks of Lil Uzi are explicit",",",len(List_of_non_explicit_tracks), "tracks are not explicit")
lil_uzi_tracks = tracks
explicit_track_pop_total = 0
non_explicit_track_pop_total = 0
lil_uzi_list_explicit = []
lil_uzi_list_nonexplicit = []
for tracks in lil_uzi_tracks:
if tracks['explicit'] == True:
explicit_track_pop_total = explicit_track_pop_total + tracks['popularity']
lil_uzi_list_explicit.append(explicit_track_pop_total)
elif tracks['explicit'] == False:
non_explicit_track_pop_total = non_explicit_track_pop_total + tracks['popularity']
lil_uzi_list_nonexplicit.append(non_explicit_track_pop_total)
explicit_track_duration_total = 0
non_explicit_track_duration_total = 0
lil_uzi_list_explicit_dur = []
lil_uzi_list_nonexplicit_dur = []
for tracks in lil_uzi_tracks:
if tracks['explicit'] == True:
explicit_track_duration_total = explicit_track_duration_total + tracks['duration_ms']
lil_uzi_list_explicit_dur.append(explicit_track_duration_total)
elif tracks['explicit'] == False:
non_explicit_track_pop_total = non_explicit_track_duration_total + tracks['duration_ms']
lil_uzi_list_nonexplicit_dur.append(non_explicit_track_duration_total)
print("The average of popularity of explicit tracks:", float(sum(lil_uzi_list_explicit))/len(lil_uzi_list_explicit))
print("The average of popularity of non explicit tracks:",sum(lil_uzi_list_nonexplicit)) #We already now this is 0
print("The duration of explicit tracks in minutes:", sum(lil_uzi_list_explicit_dur)/60000)
Explanation: First Lil'Uzi Vert
End of explanation
import requests
response = requests.get("https://api.spotify.com/v1/artists/1tqhsYv8yBBdwANFNzHtcr/top-tracks?country=US")
data4 = response.json()
lil_Dicky_tracks = data4
List_of_explicit_tracks2= []
List_of_non_explicit_tracks2= []
tracks2 = lil_Dicky_tracks['tracks']
for track in tracks2:
if track['explicit'] == True:
print(track['name'], "is explicit")
List_of_explicit_tracks2.append(track['name'])
elif tracks['explicit'] == False:
print(track['name'],"with a popularity of", track['popularity'], "is not explicit")
List_of_non_explicit_tracks2.append(track['name'])
print("#######")
print(len(List_of_explicit_tracks2), "tracks of Lil Dicky are explicit",",", len(List_of_non_explicit_tracks2), "tracks are not explicit")
explicit_track_pop_total2 = 0
non_explicit_track_pop_total2 = 0
lil_Dick_list_explicit = []
lil_Dick_list_nonexplicit = []
tracks3 = lil_Dicky_tracks['tracks']
for tracks in tracks3:
if tracks['explicit'] == True:
explicit_track_pop_total2 = explicit_track_pop_total2 + tracks['popularity']
lil_Dick_list_explicit.append(explicit_track_pop_total)
elif tracks['explicit'] == False:
non_explicit_track_pop_total2 = non_explicit_track_pop_total2 + tracks['popularity']
lil_Dick_list_nonexplicit.append(non_explicit_track_pop_total)
explicit_track_duration_total = 0
explicit_track_duration_total2 = 0
non_explicit_track_duration_total2 = 0
lil_Dick_list_explicit_dur = []
lil_Dick_list_nonexplicit_dur = []
tracks3 = lil_Dicky_tracks['tracks']
for tracks in tracks3:
if tracks['explicit'] == True:
explicit_track_duration_total2 = explicit_track_duration_total2 + tracks['duration_ms']
lil_Dick_list_explicit_dur.append(explicit_track_duration_total2)
elif tracks['explicit'] == False:
non_explicit_track_duration_total2 = non_explicit_track_duration_total2 + tracks['duration_ms']
lil_Dick_list_nonexplicit_dur.append(non_explicit_track_duration_total2)
print("The average of popularity of explicit tracks:", float(sum(lil_Dick_list_explicit))/len(lil_Dick_list_explicit))
print("The average of popularity of non explicit tracks:",sum(lil_Dick_list_nonexplicit)) #We already now this is 0
print("The duration of explicit tracks in minutes:", sum(lil_Dick_list_explicit_dur)/60000)
Explanation: Now Lil Dicky
End of explanation
# BIGGIE DATA
response = requests.get('https://api.spotify.com/v1/search?query=Biggie&type=artist&limit=50&market=US')
Biggie_data = response.json()
# LIL'S DATA
response = requests.get('https://api.spotify.com/v1/search?query=Lil&type=artist&limit=50&market=US')
Lil_data = response.json()
response = requests.get('https://api.spotify.com/v1/search?query=Lil&type=artist&limit=50&market=US')
Lil_data = response.json()
Biggie_artists = Biggie_data['artists']['total']
Lil_artists = Lil_data['artists']['total']
print("There are", Biggie_artists, "artists named Biggie on Spotify and", Lil_artists, "named with Lil on it",)
Total_Download_Time_Biggie = Biggie_artists / 50 * 5
Total_Download_Time_Lil = Lil_artists / 50 * 5
print("It would take", round(Total_Download_Time_Biggie), "seconds to download all the Biggie artists and", round(Total_Download_Time_Lil), "seconds to download the Lil artists." )
Explanation: 7 a) Since we're talking about Lils, what about Biggies? How many total "Biggie" artists are there? How many total "Lil"s? If you made 1 request every 5 seconds, how long would it take to download information on all the Lils vs the Biggies?
End of explanation
Lil_artists_pop = Lil_data['artists']['items']
for popularity in Lil_artists_pop:
print(popularity['name'], popularity['popularity'])
Biggie_pop = Biggie_data['artists']['items']
for artist in Biggie_pop:
print(artist['name'], artist['popularity'])
Lil_artists_pop = Lil_data['artists']['items']
popularity_total = 0
for popularity in Lil_artists_pop:
popularity_total = popularity_total + popularity['popularity']
print("The average rating for the top 50 artists called Lil is:", round(popularity_total / 50))
Biggie_artists_pop = Biggie_data['artists']['items']
Biggie_popularity_total = 0
for popularity2 in Biggie_artists_pop:
Biggie_popularity_total = Biggie_popularity_total + popularity2['popularity']
print("The average rating for the top 50 artists called Biggie is:", round(Biggie_popularity_total / 50) )
Explanation: 8) Out of the top 50 "Lil"s and the top 50 "Biggie"s, who is more popular on average?
End of explanation |
14,764 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
← Back to Index
Basic Feature Extraction
Somehow, we must extract the characteristics of our audio signal that are most relevant to the problem we are trying to solve. For example, if we want to classify instruments by timbre, we will want features that distinguish sounds by their timbre and not their pitch. If we want to perform pitch detection, we want features that distinguish pitch and not timbre.
This process is known as feature extraction.
Let's begin with twenty audio files
Step1: Display the kick drum signals
Step2: Display the snare drum signals
Step3: Constructing a Feature Vector
A feature vector is simply a collection of features. Here is a simple function that constructs a two-dimensional feature vector from a signal
Step4: If we want to aggregate all of the feature vectors among signals in a collection, we can use a list comprehension as follows
Step5: Visualize the differences in features by plotting separate histograms for each of the classes
Step6: Feature Scaling
The features that we used in the previous example included zero crossing rate and spectral centroid. These two features are expressed using different units. This discrepancy can pose problems when performing classification later. Therefore, we will normalize each feature vector to a common range and store the normalization parameters for later use.
Many techniques exist for scaling your features. For now, we'll use sklearn.preprocessing.MinMaxScaler. MinMaxScaler returns an array of scaled values such that each feature dimension is in the range -1 to 1.
Let's concatenate all of our feature vectors into one feature table
Step7: Scale each feature dimension to be in the range -1 to 1
Step8: Plot the scaled features | Python Code:
kick_signals = [
librosa.load(p)[0] for p in Path().glob('audio/drum_samples/train/kick_*.mp3')
]
snare_signals = [
librosa.load(p)[0] for p in Path().glob('audio/drum_samples/train/snare_*.mp3')
]
len(kick_signals)
len(snare_signals)
Explanation: ← Back to Index
Basic Feature Extraction
Somehow, we must extract the characteristics of our audio signal that are most relevant to the problem we are trying to solve. For example, if we want to classify instruments by timbre, we will want features that distinguish sounds by their timbre and not their pitch. If we want to perform pitch detection, we want features that distinguish pitch and not timbre.
This process is known as feature extraction.
Let's begin with twenty audio files: ten kick drum samples, and ten snare drum samples. Each audio file contains one drum hit.
Read and store each signal:
End of explanation
plt.figure(figsize=(15, 6))
for i, x in enumerate(kick_signals):
plt.subplot(2, 5, i+1)
librosa.display.waveplot(x[:10000])
plt.ylim(-1, 1)
Explanation: Display the kick drum signals:
End of explanation
plt.figure(figsize=(15, 6))
for i, x in enumerate(snare_signals):
plt.subplot(2, 5, i+1)
librosa.display.waveplot(x[:10000])
plt.ylim(-1, 1)
Explanation: Display the snare drum signals:
End of explanation
def extract_features(signal):
return [
librosa.feature.zero_crossing_rate(signal)[0, 0],
librosa.feature.spectral_centroid(signal)[0, 0],
]
Explanation: Constructing a Feature Vector
A feature vector is simply a collection of features. Here is a simple function that constructs a two-dimensional feature vector from a signal:
End of explanation
kick_features = numpy.array([extract_features(x) for x in kick_signals])
snare_features = numpy.array([extract_features(x) for x in snare_signals])
Explanation: If we want to aggregate all of the feature vectors among signals in a collection, we can use a list comprehension as follows:
End of explanation
plt.figure(figsize=(14, 5))
plt.hist(kick_features[:,0], color='b', range=(0, 0.2), alpha=0.5, bins=20)
plt.hist(snare_features[:,0], color='r', range=(0, 0.2), alpha=0.5, bins=20)
plt.legend(('kicks', 'snares'))
plt.xlabel('Zero Crossing Rate')
plt.ylabel('Count')
plt.figure(figsize=(14, 5))
plt.hist(kick_features[:,1], color='b', range=(0, 4000), bins=30, alpha=0.6)
plt.hist(snare_features[:,1], color='r', range=(0, 4000), bins=30, alpha=0.6)
plt.legend(('kicks', 'snares'))
plt.xlabel('Spectral Centroid (frequency bin)')
plt.ylabel('Count')
Explanation: Visualize the differences in features by plotting separate histograms for each of the classes:
End of explanation
feature_table = numpy.vstack((kick_features, snare_features))
print(feature_table.shape)
Explanation: Feature Scaling
The features that we used in the previous example included zero crossing rate and spectral centroid. These two features are expressed using different units. This discrepancy can pose problems when performing classification later. Therefore, we will normalize each feature vector to a common range and store the normalization parameters for later use.
Many techniques exist for scaling your features. For now, we'll use sklearn.preprocessing.MinMaxScaler. MinMaxScaler returns an array of scaled values such that each feature dimension is in the range -1 to 1.
Let's concatenate all of our feature vectors into one feature table:
End of explanation
scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(-1, 1))
training_features = scaler.fit_transform(feature_table)
print(training_features.min(axis=0))
print(training_features.max(axis=0))
Explanation: Scale each feature dimension to be in the range -1 to 1:
End of explanation
plt.scatter(training_features[:10,0], training_features[:10,1], c='b')
plt.scatter(training_features[10:,0], training_features[10:,1], c='r')
plt.xlabel('Zero Crossing Rate')
plt.ylabel('Spectral Centroid')
Explanation: Plot the scaled features:
End of explanation |
14,765 | Given the following text problem statement, write Python code to implement the functionality described below in problem statement
Problem:
I'm using tensorflow 2.10.0. | Problem:
import tensorflow as tf
x = tf.Variable(0)
x.assign(114514) |
14,766 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Compare evoked responses for different conditions
In this example, an Epochs object for visual and auditory responses is created.
Both conditions are then accessed by their respective names to create a sensor
layout plot of the related evoked responses.
Step1: Set parameters
Step2: Show topography for two different conditions | Python Code:
# Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD-3-Clause
import matplotlib.pyplot as plt
import mne
from mne.viz import plot_evoked_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
Explanation: Compare evoked responses for different conditions
In this example, an Epochs object for visual and auditory responses is created.
Both conditions are then accessed by their respective names to create a sensor
layout plot of the related evoked responses.
End of explanation
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks='meg', baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
Explanation: Set parameters
End of explanation
colors = 'blue', 'red'
title = 'MNE sample data\nleft vs right (A/V combined)'
plot_evoked_topo(evokeds, color=colors, title=title, background_color='w')
plt.show()
Explanation: Show topography for two different conditions
End of explanation |
14,767 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Importing the CSV files
This CSV file is available on Irans dataset in World bank](https
Step1: As I wanted the emission types be my coloumns and the years be the rows, I used transpose() function. Some data was missing for the last three years which I substituded their value by zero.
Step2: Distribution plot for CO2 emssions from liquid fuel consumption
Step3: # Joint plot for CO2 emissions from solid fuel consumption in Iran and Turkey
As there were no values for the gaseous column before 1978, I preferred to present my data using solid fuel emission values instead. | Python Code:
# Importing Iran`s dataset
IRAN_SOURCE_FILE = 'iran_emission_dataset.csv'
iran_csv = pd.read_csv(IRAN_SOURCE_FILE)
iran_csv.head(5)
# Importing Turkey`s dataset
TURKEY_SOURCE_FILE = 'turkey_emission_dataset.csv'
turkey_csv = pd.read_csv(TURKEY_SOURCE_FILE)
turkey_csv.head(5)
Explanation: Importing the CSV files
This CSV file is available on Irans dataset in World bank](https://www.worldbank.org/en/country/iran) and [Turkeys dataset in the same website . I cleaned this data using LibreOffice and kept the important rows to prevent complexity in my code.
End of explanation
iran_csv = iran_csv.transpose()
iran_csv = iran_csv.fillna(0)
iran_csv.columns = iran_csv.ix[0,:]
iran_csv = iran_csv.ix[1:,:]
iran_csv.astype(np.float64)
iran_csv.head(5)
turkey_csv = turkey_csv.transpose()
turkey_csv = turkey_csv.fillna(0)
turkey_csv.columns = turkey_csv.ix[0,:]
turkey_csv = turkey_csv.ix[1:,:]
turkey_csv.astype(np.float64)
turkey_csv.head(5)
Explanation: As I wanted the emission types be my coloumns and the years be the rows, I used transpose() function. Some data was missing for the last three years which I substituded their value by zero.
End of explanation
#Iran (Blue)
sns.distplot(iran_csv.ix[:,1])
#Turkey (Green)
sns.distplot(turkey_csv.ix[:,1])
Explanation: Distribution plot for CO2 emssions from liquid fuel consumption
End of explanation
SOLID_FUEL_COLUMN_INDEX = 2
a = sns.jointplot(iran_csv.ix[:,SOLID_FUEL_COLUMN_INDEX],
turkey_csv.ix[:,SOLID_FUEL_COLUMN_INDEX]).set_axis_labels(
"IRAN: " + iran_csv.columns[SOLID_FUEL_COLUMN_INDEX],
"TURKEY: " + turkey_csv.columns[SOLID_FUEL_COLUMN_INDEX])
a.savefig("output.png")
Explanation: # Joint plot for CO2 emissions from solid fuel consumption in Iran and Turkey
As there were no values for the gaseous column before 1978, I preferred to present my data using solid fuel emission values instead.
End of explanation |
14,768 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
operator* multiplies the same reference
Step1: Changing l[1] actually references a different object
Integers are immutable.
Step2: Setting l[2] to the same number does not create a new object
Step3: Common integers are preallocated
Step4: List comprehensions are one way to create separate lists | Python Code:
l = [[]] * 3
l[0] is l[1], l[0] is l[2]
l[0].append("abc")
l
l = [1] * 3
print(l)
l[0] is l[1], l[0] is l[2]
Explanation: operator* multiplies the same reference
End of explanation
l[1] = 2
print(l)
l[0] is l[1], l[0] is l[2], l[1] is l[2]
Explanation: Changing l[1] actually references a different object
Integers are immutable.
End of explanation
l[2] = 2
print(l)
l[0] is l[1], l[0] is l[2], l[1] is l[2]
Explanation: Setting l[2] to the same number does not create a new object
End of explanation
for i in range(-10, 260):
x = i
y = i + 1 - 1
if x is not y:
print(i)
Explanation: Common integers are preallocated
End of explanation
l = [[] for _ in range(3)]
l
l[0].append("abc")
l
Explanation: List comprehensions are one way to create separate lists
End of explanation |
14,769 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
An introduction to NumPy
NumPy provides an efficient representation of multidimensional datasets like vectors and matricies, and tools for linear algebra and general matrix manipulations - essential building blocks of virtually all technical computing
Typically NumPy is imported as np
Step1: NumPy, at its core, provides a powerful array object. Let's start by exploring how the NumPy array differs from a Python list.
We start by creating a simple Python list and a NumPy array with identical contents
Step2: Element indexing
Elements of a one-dimensional array are accessed with the same syntax as a list
Step3: Differences between arrays and lists
The first difference to note between lists and arrays is that arrays are homogeneous; i.e. all elements of an array must be of the same type. In contrast, lists can contain elements of arbitrary type. For example, we can change the last element in our list above to be a string
Step4: But the same can not be done with an array, as we get an error message
Step5: Caveat, it can be done, but really don't do it; lists are generally better at non-homogeneous collections.
Array Properties and Methods
The following provide basic information about the size, shape and data in the array
Step6: Arrays also have many useful statistical/mathematical methods
Step7: Data types
The information about the type of an array is contained in its dtype attribute.
Step8: Once an array has been created, its dtype is fixed (in this case to an 8 byte/64 bit signed integer) and it can only store elements of the same type.
For this example where the dtype is integer, if we try storing a floating point number in the array it will be automatically converted into an integer
Step9: NumPy comes with most of the common data types (and some uncommon ones too).
The most used (and portable) dtypes are
Step10: Floating point precision is covered in detail at http
Step11: Creating Arrays
Above we created an array from an existing list. Now let's look into other ways in which we can create arrays.
A common need is to have an array initialized with a constant value. Very often this value is 0 or 1.
zeros creates arrays of all zeros, with any desired dtype
Step12: and similarly for ones
Step13: If we want an array initialized with an arbitrary value, we can create an empty array and then use the fill method to put the value we want into the array
Step14: Alternatives, such as
Step15: The linspace and logspace functions to create linearly and logarithmically-spaced grids respectively, with a fixed number of points that include both ends of the specified interval
Step16: Finally, it is often useful to create arrays with random numbers that follow a specific distribution.
The np.random module contains a number of functions that can be used to this effect.
For more details see http
Step17: To produce an array of 5 random samples taken from a standard normal distribution (0 mean and variance 1)
Step18: For an array of 5 samples from the normal distribution with a mean of 10 and a variance of 3
Step19: Indexing with other arrays
Above we saw how to index NumPy arrays with single numbers and slices, just like Python lists.
Arrays also allow for a more sophisticated kind of indexing that is very powerful
Step20: Now that we have this mask, we can use it to either read those values or to reset them to 0
Step21: Whilst beyond the scope of this course, it is also worth knowing that a specific masked array object exists in NumPy.
Further details are available at http
Step22: With two-dimensional arrays we start seeing the power of NumPy
Step23: Question
Step24: Multidimensional array creation
The array creation functions listed previously can also be used to create arrays with more than one dimension.
For example
Step25: In fact, the shape of an array can be changed at any time, as long as the total number of elements is unchanged.
For example, if we want a 2x4 array with numbers increasing from 0, the easiest way to create it is
Step26: Slices
With multidimensional arrays you can also index using slices, and you can mix and match slices and single indices in the different dimensions
Step27: If you only provide one index to slice a multidimensional array, then the slice will be expanded to "
Step28: This is also known as "ellipsis".
Ellipsis can be specified explicitly with "...". It will automatically expand to "
Step29: Operating with arrays
Arrays support all regular arithmetic operators, and the NumPy library also contains a complete collection of basic mathematical functions that operate on arrays.
It is important to remember that in general, all operations with arrays are applied element-wise, i.e., are applied to all the elements of the array at the same time. For example
Step30: Importantly, even the multiplication operator is by default applied element-wise. However it is not the matrix multiplication from linear algebra
Step31: We may also multiply an array by a scalar
Step32: This is an example of broadcasting.
Broadcasting
The fact that NumPy operates on an element-wise basis means that in principle arrays must always match one another's shape. However, NumPy will also helpfully "broadcast" dimensions when possible.
Here is an example of broadcasting a scalar to a 1D array
Step33: We can also broadcast a 1D array to a 2D array, in this case adding a vector to all rows of a matrix
Step34: We can also broadcast in two directions at a time
Step35: Pictorially
Step36: Exercise 1
1. Use np.arange and reshape to create the array
A = [[1 2 3 4]
[5 6 7 8]]
2. Use np.array to create the array
B = [1 2]
3. Use broadcasting to add B to A to create the final array
A + B = [[2 3 4 5]
[7 8 9 10]
Hint
Step37: As you can see in this example, the value of the axis parameter is the dimension that will be consumed once the operation has been carried out. This is why to sum along the columns we use axis=0.
This can be easily illustrated with an example that has more dimensions; we create an array with 4 dimensions and shape (3,4,5,6) and sum along the axis number 2 (i.e. the third axis, since in Python all counts are 0-based). That consumes the dimension whose length was 5, leaving us with a new array that has shape (3,4,6)
Step38: Another widely used property of arrays is the .T attribute, which allows you to access the transpose of the array
Step39: Generating 2D coordinate arrays
A common task is to generate a pair of arrays that represent the coordinates of our data.
When orthogonal 1d coordinate arrays already exist, NumPy's meshgrid function is very useful
Step40: Reshape and newaxis
Reshaping arrays is a common task in order to make the best of NumPy's powerful broadcasting.
A useful tip with the reshape method is that it is possible to provide a -1 length for at most one of the dimensions. This indicates that NumPy should automatically calculate the length of this dimension
Step41: Another way to increase the dimensionality of an array is to use the newaxis keyword
Step42: Views, not Copies
Note that reshaping (like most NumPy operations), wherever possible, provides a view of the same memory
Step43: What this means is that if one array is modified, the other will also be updated
Step44: This lack of copying allows for very efficient vectorized operations, but this power should be used carefully - if used badly it can lead to some bugs that are hard to track down.
If in doubt, you can always copy the data to a different block of memory with the copy() method.
Element-wise Functions
NumPy ships with a full complement of mathematical functions that work on entire arrays, including logarithms, exponentials, trigonometric and hyperbolic trigonometric functions, etc.
For example, sampling the sine function at 100 points between $0$ and $2\pi$ is as simple as
Step45: Or to sample the exponential function between $-5$ and $5$ at intervals of $0.5$
Step46: Linear algebra in NumPy
NumPy ships with a basic linear algebra library, and all arrays have a dot method whose behavior is that of the scalar dot product when its arguments are vectors (one-dimensional arrays) and the traditional matrix multiplication when one or both of its arguments are two-dimensional arrays
Step47: For matrix-matrix multiplication, the regular $matrix \times matrix$ rules must be satisfied. For example $A \times A^T$
Step48: results in a (2, 2) array, yet $A^T \times A$ results in a (3, 3).
Why is this?
Step49: NumPy makes no distinction between row and column vectors and simply verifies that the dimensions match the required rules of matrix multiplication.
Below is an example of matrix-vector multiplication, and in this case we have a $2 \times 3$ matrix multiplied by a 3-vector, which produces a 2-vector | Python Code:
import numpy as np
Explanation: An introduction to NumPy
NumPy provides an efficient representation of multidimensional datasets like vectors and matricies, and tools for linear algebra and general matrix manipulations - essential building blocks of virtually all technical computing
Typically NumPy is imported as np:
End of explanation
lst = [10, 20, 30, 40]
arr = np.array([10, 20, 30, 40])
print(lst)
print(arr)
Explanation: NumPy, at its core, provides a powerful array object. Let's start by exploring how the NumPy array differs from a Python list.
We start by creating a simple Python list and a NumPy array with identical contents:
End of explanation
print(lst[0], arr[0])
print(lst[-1], arr[-1])
print(lst[2:], arr[2:])
Explanation: Element indexing
Elements of a one-dimensional array are accessed with the same syntax as a list:
End of explanation
lst[-1] = 'a string inside a list'
lst
Explanation: Differences between arrays and lists
The first difference to note between lists and arrays is that arrays are homogeneous; i.e. all elements of an array must be of the same type. In contrast, lists can contain elements of arbitrary type. For example, we can change the last element in our list above to be a string:
End of explanation
arr[-1] = 'a string inside an array'
Explanation: But the same can not be done with an array, as we get an error message:
End of explanation
print('Data type :', arr.dtype)
print('Total number of elements :', arr.size)
print('Number of dimensions :', arr.ndim)
print('Shape (dimensionality) :', arr.shape)
print('Memory used (in bytes) :', arr.nbytes)
Explanation: Caveat, it can be done, but really don't do it; lists are generally better at non-homogeneous collections.
Array Properties and Methods
The following provide basic information about the size, shape and data in the array:
End of explanation
print('Minimum and maximum :', arr.min(), arr.max())
print('Sum and product of all elements :', arr.sum(), arr.prod())
print('Mean and standard deviation :', arr.mean(), arr.std())
Explanation: Arrays also have many useful statistical/mathematical methods:
End of explanation
arr.dtype
Explanation: Data types
The information about the type of an array is contained in its dtype attribute.
End of explanation
arr[-1] = 1.234
arr
Explanation: Once an array has been created, its dtype is fixed (in this case to an 8 byte/64 bit signed integer) and it can only store elements of the same type.
For this example where the dtype is integer, if we try storing a floating point number in the array it will be automatically converted into an integer:
End of explanation
np.array(256, dtype=np.uint8)
float_info = ('{finfo.dtype}: max={finfo.max:<18}, '
'approx decimal precision={finfo.precision};')
print(float_info.format(finfo=np.finfo(np.float32)))
print(float_info.format(finfo=np.finfo(np.float64)))
Explanation: NumPy comes with most of the common data types (and some uncommon ones too).
The most used (and portable) dtypes are:
bool
uint8
int (machine dependent)
int8
int32
int64
float (machine dependent)
float32
float64
Full details can be found at http://docs.scipy.org/doc/numpy/user/basics.types.html.
What are the limits of the common NumPy integer types?
End of explanation
np.array(1, dtype=np.uint8).astype(np.float32)
Explanation: Floating point precision is covered in detail at http://en.wikipedia.org/wiki/Floating_point.
However, we can convert an array from one type to another with the astype method
End of explanation
np.zeros(5, dtype=np.float)
np.zeros(3, dtype=np.int)
Explanation: Creating Arrays
Above we created an array from an existing list. Now let's look into other ways in which we can create arrays.
A common need is to have an array initialized with a constant value. Very often this value is 0 or 1.
zeros creates arrays of all zeros, with any desired dtype:
End of explanation
print('5 ones:', np.ones(5, dtype=np.int))
Explanation: and similarly for ones:
End of explanation
a = np.empty(4, dtype=np.float)
a.fill(5.5)
a
Explanation: If we want an array initialized with an arbitrary value, we can create an empty array and then use the fill method to put the value we want into the array:
End of explanation
np.arange(10, dtype=np.float64)
np.arange(5, 7, 0.1)
Explanation: Alternatives, such as:
np.ones(4) * 5.5
np.zeros(4) + 5.5
are generally less efficient, but are also reasonable.
Filling arrays with sequences
NumPy also offers the arange function, which works like the builtin range but returns an array instead of a list:
End of explanation
print("A linear grid between 0 and 1:")
print(np.linspace(0, 1, 5))
print("A logarithmic grid between 10**2 and 10**4:")
print(np.logspace(2, 4, 3))
Explanation: The linspace and logspace functions to create linearly and logarithmically-spaced grids respectively, with a fixed number of points that include both ends of the specified interval:
End of explanation
import numpy as np
import numpy.random
Explanation: Finally, it is often useful to create arrays with random numbers that follow a specific distribution.
The np.random module contains a number of functions that can be used to this effect.
For more details see http://docs.scipy.org/doc/numpy/reference/routines.random.html.
Creating random arrays
Finally, it is often useful to create arrays with random numbers that follow a specific distribution. The np.random module contains a number of functions that can be used to this effect.
First, we must import it:
End of explanation
print(np.random.randn(5))
Explanation: To produce an array of 5 random samples taken from a standard normal distribution (0 mean and variance 1):
End of explanation
norm10 = np.random.normal(10, 3, 5)
print(norm10)
Explanation: For an array of 5 samples from the normal distribution with a mean of 10 and a variance of 3:
End of explanation
mask = norm10 > 9
mask
Explanation: Indexing with other arrays
Above we saw how to index NumPy arrays with single numbers and slices, just like Python lists.
Arrays also allow for a more sophisticated kind of indexing that is very powerful: you can index an array with another array, and in particular with an array of boolean values.
This is particularly useful to extract information from an array that matches a certain condition.
Consider for example that in the array norm10 we want to replace all values above 9 with the value 0. We can do so by first finding the mask that indicates where this condition is true or false:
End of explanation
print(('Values above 9:', norm10[mask]))
print('Resetting all values above 9 to 0...')
norm10[mask] = 0
print(norm10)
Explanation: Now that we have this mask, we can use it to either read those values or to reset them to 0:
End of explanation
lst2 = [[1, 2, 3], [4, 5, 6]]
arr2 = np.array([[1, 2, 3], [4, 5, 6]])
print(arr2)
print(arr2.shape)
Explanation: Whilst beyond the scope of this course, it is also worth knowing that a specific masked array object exists in NumPy.
Further details are available at http://docs.scipy.org/doc/numpy/reference/maskedarray.generic.html
Arrays with more than one dimension
Up until now all our examples have used one-dimensional arrays. NumPy can also create arrays of arbitrary dimensions, and all the methods illustrated in the previous section work on arrays with more than one dimension.
A list of lists can be used to initialize a two dimensional array:
End of explanation
print(lst2[0][1])
print(arr2[0, 1])
Explanation: With two-dimensional arrays we start seeing the power of NumPy: while nested lists can be indexed by repeatedly using the [ ] operator, multidimensional arrays support a much more natural indexing syntax using a single [ ] and a set of indices separated by commas:
End of explanation
print(lst2[0:2][1])
print(arr2[0:2, 1])
Explanation: Question: Why does the following example produce different results?
End of explanation
np.zeros((2, 3))
np.random.normal(10, 3, size=(2, 4))
Explanation: Multidimensional array creation
The array creation functions listed previously can also be used to create arrays with more than one dimension.
For example:
End of explanation
arr = np.arange(8).reshape(2, 4)
print(arr)
Explanation: In fact, the shape of an array can be changed at any time, as long as the total number of elements is unchanged.
For example, if we want a 2x4 array with numbers increasing from 0, the easiest way to create it is:
End of explanation
arr = np.arange(2, 18, 2).reshape(2, 4)
print(arr)
print('Second element from dimension 0, last 2 elements from dimension one:')
print(arr[1, 2:])
Explanation: Slices
With multidimensional arrays you can also index using slices, and you can mix and match slices and single indices in the different dimensions:
End of explanation
print('First row: ', arr[0], 'is equivalent to', arr[0, :])
print('Second row: ', arr[1], 'is equivalent to', arr[1, :])
Explanation: If you only provide one index to slice a multidimensional array, then the slice will be expanded to ":" for all of the remaining dimensions:
End of explanation
arr1 = np.empty((4, 6, 3))
print('Orig shape: ', arr1.shape)
print(arr1[...].shape)
print(arr1[..., 0:2].shape)
print(arr1[2:4, ..., ::2].shape)
print(arr1[2:4, :, ..., ::-1].shape)
Explanation: This is also known as "ellipsis".
Ellipsis can be specified explicitly with "...". It will automatically expand to ":" for each of the unspecified dimensions in the array, and can even be used at the beginning of the slice:
End of explanation
arr1 = np.arange(4)
arr2 = np.arange(10, 14)
print(arr1, '+', arr2, '=', arr1 + arr2)
Explanation: Operating with arrays
Arrays support all regular arithmetic operators, and the NumPy library also contains a complete collection of basic mathematical functions that operate on arrays.
It is important to remember that in general, all operations with arrays are applied element-wise, i.e., are applied to all the elements of the array at the same time. For example:
End of explanation
print(arr1, '*', arr2, '=', arr1 * arr2)
Explanation: Importantly, even the multiplication operator is by default applied element-wise. However it is not the matrix multiplication from linear algebra:
End of explanation
1.5 * arr1
Explanation: We may also multiply an array by a scalar:
End of explanation
print(np.arange(3))
print(np.arange(3) + 5)
Explanation: This is an example of broadcasting.
Broadcasting
The fact that NumPy operates on an element-wise basis means that in principle arrays must always match one another's shape. However, NumPy will also helpfully "broadcast" dimensions when possible.
Here is an example of broadcasting a scalar to a 1D array:
End of explanation
np.ones((3, 3)) + np.arange(3)
Explanation: We can also broadcast a 1D array to a 2D array, in this case adding a vector to all rows of a matrix:
End of explanation
a = np.arange(3).reshape((3, 1))
b = np.arange(3)
print(a, '+', b, '=\n', a + b)
Explanation: We can also broadcast in two directions at a time:
End of explanation
arr1 = np.ones((2, 3))
arr2 = np.ones((2, 1))
# arr1 + arr2
arr1 = np.ones((2, 3))
arr2 = np.ones(3)
# arr1 + arr2
arr1 = np.ones((1, 3))
arr2 = np.ones((2, 1))
# arr1 + arr2
arr1 = np.ones((1, 3))
arr2 = np.ones((1, 2))
# arr1 + arr2
arr1 = np.ones((1, 3))
arr3 = arr2[:, :, np.newaxis]
# arr1 + arr3
Explanation: Pictorially:
(image source)
Rules of Broadcasting
Broadcasting follows these three rules:
If the two arrays differ in their number of dimensions, the shape of the array with fewer dimensions is padded with ones on its leading (left) side.
If the shape of the two arrays does not match in any dimension, either array with shape equal to 1 in a given dimension is stretched to match the other shape.
If in any dimension the sizes disagree and neither has shape equal to 1, an error is raised.
Note that all of this happens without ever actually creating the expanded arrays in memory! This broadcasting behavior is in practice enormously powerful, especially given that when NumPy broadcasts to create new dimensions or to 'stretch' existing ones, it doesn't actually duplicate the data. In the example above the operation is carried out as if the scalar 1.5 was a 1D array with 1.5 in all of its entries, but no actual array is ever created. This can save lots of memory in cases when the arrays in question are large. As such this can have significant performance implications.
Broadcasting Examples
So when we do...
np.arange(3) + 5
The scalar 5 is:
first 'promoted' to a 1-dimensional array of length 1 (rule 1)
then, this array is 'stretched' to length 3 to match the first array. (rule 2)
After these two operations are complete, the addition can proceed as now both operands are one-dimensional arrays of length 3.
When we do
np.ones((3, 3)) + np.arange(3)
The second array is:
first 'promoted' to a 2-dimensional array of shape (1, 3) (rule 1)
then axis 0 is 'stretched' to length 3 to match the first array (rule 2)
When we do
np.arange(3).reshape((3, 1)) + np.arange(3)
The second array is:
first 'promoted' to a 2-dimensional array of shape (1, 3) (rule 1)
then axis 0 is 'stretched' to form an array of shape (3, 3) (rule 2)
and the first array's axis 1 is 'stretched' to form an array of shape (3, 3) (rule 2)
Then the operation proceeds as if on two 3 $\times$ 3 arrays.
The general rule is: when operating on two arrays, NumPy compares their shapes element-wise. It starts with the trailing dimensions, and works its way forward, creating dimensions of length 1 as needed. Two dimensions are considered compatible when
they are equal to begin with, or
one of them is 1; in this case NumPy will do the 'stretching' to make them equal.
If these conditions are not met, a ValueError: operands could not be broadcast together exception is thrown, indicating that the arrays have incompatible shapes.
Questions:
What will the result of adding arr1 with arr2 be in the following cases?
End of explanation
print('For the following array:\n', arr)
print('The sum of elements along the rows is :', arr.sum(axis=1))
print('The sum of elements along the columns is :', arr.sum(axis=0))
Explanation: Exercise 1
1. Use np.arange and reshape to create the array
A = [[1 2 3 4]
[5 6 7 8]]
2. Use np.array to create the array
B = [1 2]
3. Use broadcasting to add B to A to create the final array
A + B = [[2 3 4 5]
[7 8 9 10]
Hint: what shape does B have to be changed to?
Array Properties and Methods (cont.)
For multidimensional arrays it is possible to carry out computations along a single dimension by passing the axis parameter:
End of explanation
np.zeros((3, 4, 5, 6)).sum(axis=2).shape
Explanation: As you can see in this example, the value of the axis parameter is the dimension that will be consumed once the operation has been carried out. This is why to sum along the columns we use axis=0.
This can be easily illustrated with an example that has more dimensions; we create an array with 4 dimensions and shape (3,4,5,6) and sum along the axis number 2 (i.e. the third axis, since in Python all counts are 0-based). That consumes the dimension whose length was 5, leaving us with a new array that has shape (3,4,6):
End of explanation
print('Array:\n', arr)
print('Transpose:\n', arr.T)
Explanation: Another widely used property of arrays is the .T attribute, which allows you to access the transpose of the array:
End of explanation
x = np.linspace(0, 9, 3)
y = np.linspace(-8, 4, 3)
x2d, y2d = np.meshgrid(x, y)
print(x2d)
print(y2d)
Explanation: Generating 2D coordinate arrays
A common task is to generate a pair of arrays that represent the coordinates of our data.
When orthogonal 1d coordinate arrays already exist, NumPy's meshgrid function is very useful:
End of explanation
np.arange(6).reshape((1, -1))
np.arange(6).reshape((2, -1))
Explanation: Reshape and newaxis
Reshaping arrays is a common task in order to make the best of NumPy's powerful broadcasting.
A useful tip with the reshape method is that it is possible to provide a -1 length for at most one of the dimensions. This indicates that NumPy should automatically calculate the length of this dimension:
End of explanation
arr = np.arange(6)
print(arr[np.newaxis, :, np.newaxis].shape)
Explanation: Another way to increase the dimensionality of an array is to use the newaxis keyword:
End of explanation
arr = np.arange(8)
arr_view = arr.reshape(2, 4)
Explanation: Views, not Copies
Note that reshaping (like most NumPy operations), wherever possible, provides a view of the same memory:
End of explanation
# Print the "view" array from reshape.
print('Before\n', arr_view)
# Update the first element of the original array.
arr[0] = 1000
# Print the "view" array from reshape again,
# noticing the first value has changed.
print('After\n', arr_view)
Explanation: What this means is that if one array is modified, the other will also be updated:
End of explanation
x = np.linspace(0, 2*np.pi, 100)
y = np.sin(x)
Explanation: This lack of copying allows for very efficient vectorized operations, but this power should be used carefully - if used badly it can lead to some bugs that are hard to track down.
If in doubt, you can always copy the data to a different block of memory with the copy() method.
Element-wise Functions
NumPy ships with a full complement of mathematical functions that work on entire arrays, including logarithms, exponentials, trigonometric and hyperbolic trigonometric functions, etc.
For example, sampling the sine function at 100 points between $0$ and $2\pi$ is as simple as:
End of explanation
x = np.arange(-5, 5.5, 0.5)
y = np.exp(x)
Explanation: Or to sample the exponential function between $-5$ and $5$ at intervals of $0.5$:
End of explanation
v1 = np.array([2, 3, 4])
v2 = np.array([1, 0, 1])
print(v1, '.', v2, '=', np.dot(v1, v2))
Explanation: Linear algebra in NumPy
NumPy ships with a basic linear algebra library, and all arrays have a dot method whose behavior is that of the scalar dot product when its arguments are vectors (one-dimensional arrays) and the traditional matrix multiplication when one or both of its arguments are two-dimensional arrays:
End of explanation
A = np.arange(6).reshape(2, 3)
print(A, '\n')
print(np.dot(A, A.T))
Explanation: For matrix-matrix multiplication, the regular $matrix \times matrix$ rules must be satisfied. For example $A \times A^T$:
End of explanation
print(np.dot(A.T, A))
Explanation: results in a (2, 2) array, yet $A^T \times A$ results in a (3, 3).
Why is this?:
End of explanation
print(A, 'x', v1, '=', np.dot(A, v1))
Explanation: NumPy makes no distinction between row and column vectors and simply verifies that the dimensions match the required rules of matrix multiplication.
Below is an example of matrix-vector multiplication, and in this case we have a $2 \times 3$ matrix multiplied by a 3-vector, which produces a 2-vector:
End of explanation |
14,770 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Vertex SDK
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
Step3: Before you begin
GPU runtime
This tutorial does not require a GPU runtime.
Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.
Make sure that billing is enabled for your project.
Enable the following APIs
Step4: Region
You can also change the REGION variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
Americas
Step5: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
Step6: Authenticate your Google Cloud account
If you are using Google Cloud Notebooks, your environment is already authenticated. Skip this step.
If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
Otherwise, follow these steps
Step7: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Next, set up some variables used throughout the tutorial.
Import libraries and define constants
Step11: Initialize Vertex SDK for Python
Initialize the Vertex SDK for Python for your project and corresponding bucket.
Step12: Set hardware accelerators
You can set hardware accelerators for training and prediction.
Set the variables TRAIN_GPU/TRAIN_NGPU and DEPLOY_GPU/DEPLOY_NGPU to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify
Step13: Set pre-built containers
Set the pre-built Docker container image for training and prediction.
For the latest list, see Pre-built containers for training.
For the latest list, see Pre-built containers for prediction.
Step14: Set machine type
Next, set the machine type to use for training and prediction.
Set the variables TRAIN_COMPUTE and DEPLOY_COMPUTE to configure the compute resources for the VMs you will use for for training and prediction.
machine type
n1-standard
Step15: Tutorial
Now you are ready to start creating your own custom model and training for Boston Housing.
Examine the training package
Package layout
Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.
PKG-INFO
README.md
setup.cfg
setup.py
trainer
__init__.py
task.py
The files setup.cfg and setup.py are the instructions for installing the package into the operating environment of the Docker image.
The file trainer/task.py is the Python script for executing the custom training job. Note, when we referred to it in the worker pool specification, we replace the directory slash with a dot (trainer.task) and dropped the file suffix (.py).
Package Assembly
In the following cells, you will assemble the training package.
Step16: Task.py contents
In the next cell, you write the contents of the training script task.py. I won't go into detail, it's just there for you to browse. In summary
Step17: Store training script on your Cloud Storage bucket
Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
Step18: Create and run custom training job
To train a custom model, you perform two steps
Step19: Prepare your command-line arguments
Now define the command-line arguments for your custom training container
Step20: Run the custom training job
Next, you run the custom job to start the training job by invoking the method run, with the following parameters
Step21: Load the saved model
Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.
To load, you use the TF.Keras model.load_model() method passing it the Cloud Storage path where the model is saved -- specified by MODEL_DIR.
Step22: Evaluate the model
Now let's find out how good the model is.
Load evaluation data
You will load the Boston Housing test (holdout) data from tf.keras.datasets, using the method load_data(). This returns the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements
Step23: Perform the model evaluation
Now evaluate how well the model in the custom job did.
Step24: Get the serving function signature
You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.
When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.
You also need to know the name of the serving function's input and output layer for constructing the explanation metadata -- which is discussed subsequently.
Step25: Explanation Specification
To get explanations when doing a prediction, you must enable the explanation capability and set corresponding settings when you upload your custom model to an Vertex Model resource. These settings are referred to as the explanation metadata, which consists of
Step26: Explanation Metadata
Let's first dive deeper into the explanation metadata, which consists of
Step27: Upload the model
Next, upload your model to a Model resource using Model.upload() method, with the following parameters
Step28: Send a batch prediction request
Send a batch prediction to your deployed model.
Make test items
You will use synthetic data as a test data items. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.
Make the batch input file
Now make a batch input file, which you will store in your local Cloud Storage bucket. Unlike image, video and text, the batch input file for tabular is only supported for CSV. For CSV file, you make
Step29: Make the batch explanation request
Now that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters
Step30: Wait for completion of batch prediction job
Next, wait for the batch job to complete. Alternatively, one can set the parameter sync to True in the batch_predict() method to block until the batch prediction job is completed.
Step31: Get the explanations
Next, get the explanation results from the completed batch prediction job.
The results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more explanation requests in a CSV format
Step32: Cleaning up
To clean up all Google Cloud resources used in this project, you can delete the Google Cloud
project you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial | Python Code:
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
Explanation: Vertex SDK: Custom training tabular regression model for batch prediction with explainabilty
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_custom_tabular_regression_batch_explain.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_custom_tabular_regression_batch_explain.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
<td>
<a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_custom_tabular_regression_batch_explain.ipynb">
Open in Google Cloud Notebooks
</a>
</td>
</table>
<br/><br/><br/>
Overview
This tutorial demonstrates how to use the Vertex SDK to train and deploy a custom tabular regression model for batch prediction with explanation.
Dataset
The dataset used for this tutorial is the Boston Housing Prices dataset. The version of the dataset you will use in this tutorial is built into TensorFlow. The trained model predicts the median price of a house in units of 1K USD.
Objective
In this tutorial, you create a custom model, with a training pipeline, from a Python script in a Google prebuilt Docker container using the Vertex SDK, and then do a batch prediction with explanations on the uploaded model. You can alternatively create custom models using gcloud command-line tool or online using Cloud Console.
The steps performed include:
Create a Vertex custom job for training a model.
Train the TensorFlow model.
Retrieve and load the model artifacts.
View the model evaluation.
Set explanation parameters.
Upload the model as a Vertex Model resource.
Make a batch prediction with explanations.
Costs
This tutorial uses billable components of Google Cloud:
Vertex AI
Cloud Storage
Learn about Vertex AI
pricing and Cloud Storage
pricing, and use the Pricing
Calculator
to generate a cost estimate based on your projected usage.
Set up your local development environment
If you are using Colab or Google Cloud Notebooks, your environment already meets all the requirements to run this notebook. You can skip this step.
Otherwise, make sure your environment meets this notebook's requirements. You need the following:
The Cloud Storage SDK
Git
Python 3
virtualenv
Jupyter notebook running in a virtual environment with Python 3
The Cloud Storage guide to Setting up a Python development environment and the Jupyter installation guide provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:
Install and initialize the SDK.
Install Python 3.
Install virtualenv and create a virtual environment that uses Python 3. Activate the virtual environment.
To install Jupyter, run pip3 install jupyter on the command-line in a terminal shell.
To launch Jupyter, run jupyter notebook on the command-line in a terminal shell.
Open this notebook in the Jupyter Notebook Dashboard.
Installation
Install the latest version of Vertex SDK for Python.
End of explanation
! pip3 install -U google-cloud-storage $USER_FLAG
if os.environ["IS_TESTING"]:
! pip3 install --upgrade tensorflow $USER_FLAG
Explanation: Install the latest GA version of google-cloud-storage library as well.
End of explanation
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
Explanation: Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
End of explanation
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
Explanation: Before you begin
GPU runtime
This tutorial does not require a GPU runtime.
Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.
Make sure that billing is enabled for your project.
Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.
If you are running this notebook locally, you will need to install the Cloud SDK.
Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $.
End of explanation
REGION = "us-central1" # @param {type: "string"}
Explanation: Region
You can also change the REGION variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
Americas: us-central1
Europe: europe-west4
Asia Pacific: asia-east1
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
Learn more about Vertex AI regions
End of explanation
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
Explanation: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
End of explanation
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
Explanation: Authenticate your Google Cloud account
If you are using Google Cloud Notebooks, your environment is already authenticated. Skip this step.
If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
Otherwise, follow these steps:
In the Cloud Console, go to the Create service account key page.
Click Create service account.
In the Service account name field, enter a name, and click Create.
In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex" into the filter box, and select Vertex Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
End of explanation
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
Explanation: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
End of explanation
! gsutil mb -l $REGION $BUCKET_NAME
Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.
End of explanation
! gsutil ls -al $BUCKET_NAME
Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents:
End of explanation
import google.cloud.aiplatform as aip
Explanation: Set up variables
Next, set up some variables used throughout the tutorial.
Import libraries and define constants
End of explanation
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
Explanation: Initialize Vertex SDK for Python
Initialize the Vertex SDK for Python for your project and corresponding bucket.
End of explanation
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (None, None)
if os.getenv("IS_TESTING_DEPLOY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPLOY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
Explanation: Set hardware accelerators
You can set hardware accelerators for training and prediction.
Set the variables TRAIN_GPU/TRAIN_NGPU and DEPLOY_GPU/DEPLOY_NGPU to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:
(aip.AcceleratorType.NVIDIA_TESLA_K80, 4)
Otherwise specify (None, None) to use a container image to run on a CPU.
Learn more here hardware accelerator support for your region
Note: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support.
End of explanation
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
Explanation: Set pre-built containers
Set the pre-built Docker container image for training and prediction.
For the latest list, see Pre-built containers for training.
For the latest list, see Pre-built containers for prediction.
End of explanation
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
Explanation: Set machine type
Next, set the machine type to use for training and prediction.
Set the variables TRAIN_COMPUTE and DEPLOY_COMPUTE to configure the compute resources for the VMs you will use for for training and prediction.
machine type
n1-standard: 3.75GB of memory per vCPU.
n1-highmem: 6.5GB of memory per vCPU
n1-highcpu: 0.9 GB of memory per vCPU
vCPUs: number of [2, 4, 8, 16, 32, 64, 96 ]
Note: The following is not supported for training:
standard: 2 vCPUs
highcpu: 2, 4 and 8 vCPUs
Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs.
End of explanation
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: Boston Housing tabular regression\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
Explanation: Tutorial
Now you are ready to start creating your own custom model and training for Boston Housing.
Examine the training package
Package layout
Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.
PKG-INFO
README.md
setup.cfg
setup.py
trainer
__init__.py
task.py
The files setup.cfg and setup.py are the instructions for installing the package into the operating environment of the Docker image.
The file trainer/task.py is the Python script for executing the custom training job. Note, when we referred to it in the worker pool specification, we replace the directory slash with a dot (trainer.task) and dropped the file suffix (.py).
Package Assembly
In the following cells, you will assemble the training package.
End of explanation
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for Boston Housing
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import numpy as np
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.001, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=20, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=100, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
parser.add_argument('--param-file', dest='param_file',
default='/tmp/param.txt', type=str,
help='Output file for parameters')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
def make_dataset():
# Scaling Boston Housing data features
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float)
return feature, max
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
params = []
for _ in range(13):
x_train[_], max = scale(x_train[_])
x_test[_], _ = scale(x_test[_])
params.append(max)
# store the normalization (max) value for each feature
with tf.io.gfile.GFile(args.param_file, 'w') as f:
f.write(str(params))
return (x_train, y_train), (x_test, y_test)
# Build the Keras model
def build_and_compile_dnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(128, activation='relu', input_shape=(13,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(
loss='mse',
optimizer=tf.keras.optimizers.RMSprop(learning_rate=args.lr))
return model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
BATCH_SIZE = 16
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_dnn_model()
# Train the model
(x_train, y_train), (x_test, y_test) = make_dataset()
model.fit(x_train, y_train, epochs=args.epochs, batch_size=GLOBAL_BATCH_SIZE)
model.save(args.model_dir)
Explanation: Task.py contents
In the next cell, you write the contents of the training script task.py. I won't go into detail, it's just there for you to browse. In summary:
Get the directory where to save the model artifacts from the command line (--model_dir), and if not specified, then from the environment variable AIP_MODEL_DIR.
Loads Boston Housing dataset from TF.Keras builtin datasets
Builds a simple deep neural network model using TF.Keras model API.
Compiles the model (compile()).
Sets a training distribution strategy according to the argument args.distribute.
Trains the model (fit()) with epochs specified by args.epochs.
Saves the trained model (save(args.model_dir)) to the specified model directory.
Saves the maximum value for each feature f.write(str(params)) to the specified parameters file.
End of explanation
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_boston.tar.gz
Explanation: Store training script on your Cloud Storage bucket
Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
End of explanation
job = aip.CustomTrainingJob(
display_name="boston_" + TIMESTAMP,
script_path="custom/trainer/task.py",
container_uri=TRAIN_IMAGE,
requirements=["gcsfs==0.7.1", "tensorflow-datasets==4.4"],
)
print(job)
Explanation: Create and run custom training job
To train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job.
Create custom training job
A custom training job is created with the CustomTrainingJob class, with the following parameters:
display_name: The human readable name for the custom training job.
container_uri: The training container image.
requirements: Package requirements for the training container image (e.g., pandas).
script_path: The relative path to the training script.
End of explanation
MODEL_DIR = "{}/{}".format(BUCKET_NAME, TIMESTAMP)
EPOCHS = 20
STEPS = 100
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
]
Explanation: Prepare your command-line arguments
Now define the command-line arguments for your custom training container:
args: The command-line arguments to pass to the executable that is set as the entry point into the container.
--model-dir : For our demonstrations, we use this command-line argument to specify where to store the model artifacts.
direct: You pass the Cloud Storage location as a command line argument to your training script (set variable DIRECT = True), or
indirect: The service passes the Cloud Storage location as the environment variable AIP_MODEL_DIR to your training script (set variable DIRECT = False). In this case, you tell the service the model artifact location in the job specification.
"--epochs=" + EPOCHS: The number of epochs for training.
"--steps=" + STEPS: The number of steps per epoch.
End of explanation
if TRAIN_GPU:
job.run(
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
accelerator_type=TRAIN_GPU.name,
accelerator_count=TRAIN_NGPU,
base_output_dir=MODEL_DIR,
sync=True,
)
else:
job.run(
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
base_output_dir=MODEL_DIR,
sync=True,
)
model_path_to_deploy = MODEL_DIR
Explanation: Run the custom training job
Next, you run the custom job to start the training job by invoking the method run, with the following parameters:
args: The command-line arguments to pass to the training script.
replica_count: The number of compute instances for training (replica_count = 1 is single node training).
machine_type: The machine type for the compute instances.
accelerator_type: The hardware accelerator type.
accelerator_count: The number of accelerators to attach to a worker replica.
base_output_dir: The Cloud Storage location to write the model artifacts to.
sync: Whether to block until completion of the job.
End of explanation
import tensorflow as tf
local_model = tf.keras.models.load_model(MODEL_DIR)
Explanation: Load the saved model
Your model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.
To load, you use the TF.Keras model.load_model() method passing it the Cloud Storage path where the model is saved -- specified by MODEL_DIR.
End of explanation
import numpy as np
from tensorflow.keras.datasets import boston_housing
(_, _), (x_test, y_test) = boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float32)
return feature
# Let's save one data item that has not been scaled
x_test_notscaled = x_test[0:1].copy()
for _ in range(13):
x_test[_] = scale(x_test[_])
x_test = x_test.astype(np.float32)
print(x_test.shape, x_test.dtype, y_test.shape)
print("scaled", x_test[0])
print("unscaled", x_test_notscaled)
Explanation: Evaluate the model
Now let's find out how good the model is.
Load evaluation data
You will load the Boston Housing test (holdout) data from tf.keras.datasets, using the method load_data(). This returns the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the feature data, and the corresponding labels (median value of owner-occupied home).
You don't need the training data, and hence why we loaded it as (_, _).
Before you can run the data through evaluation, you need to preprocess it:
x_test:
1. Normalize (rescale) the data in each column by dividing each value by the maximum value of that column. This replaces each single value with a 32-bit floating point number between 0 and 1.
End of explanation
local_model.evaluate(x_test, y_test)
Explanation: Perform the model evaluation
Now evaluate how well the model in the custom job did.
End of explanation
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
serving_output = list(loaded.signatures["serving_default"].structured_outputs.keys())[0]
print("Serving function output:", serving_output)
input_name = local_model.input.name
print("Model input name:", input_name)
output_name = local_model.output.name
print("Model output name:", output_name)
Explanation: Get the serving function signature
You can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.
When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request.
You also need to know the name of the serving function's input and output layer for constructing the explanation metadata -- which is discussed subsequently.
End of explanation
XAI = "ig" # [ shapley, ig, xrai ]
if XAI == "shapley":
PARAMETERS = {"sampled_shapley_attribution": {"path_count": 10}}
elif XAI == "ig":
PARAMETERS = {"integrated_gradients_attribution": {"step_count": 50}}
elif XAI == "xrai":
PARAMETERS = {"xrai_attribution": {"step_count": 50}}
parameters = aip.explain.ExplanationParameters(PARAMETERS)
Explanation: Explanation Specification
To get explanations when doing a prediction, you must enable the explanation capability and set corresponding settings when you upload your custom model to an Vertex Model resource. These settings are referred to as the explanation metadata, which consists of:
parameters: This is the specification for the explainability algorithm to use for explanations on your model. You can choose between:
Shapley - Note, not recommended for image data -- can be very long running
XRAI
Integrated Gradients
metadata: This is the specification for how the algoithm is applied on your custom model.
Explanation Parameters
Let's first dive deeper into the settings for the explainability algorithm.
Shapley
Assigns credit for the outcome to each feature, and considers different permutations of the features. This method provides a sampling approximation of exact Shapley values.
Use Cases:
- Classification and regression on tabular data.
Parameters:
path_count: This is the number of paths over the features that will be processed by the algorithm. An exact approximation of the Shapley values requires M! paths, where M is the number of features. For the CIFAR10 dataset, this would be 784 (28*28).
For any non-trival number of features, this is too compute expensive. You can reduce the number of paths over the features to M * path_count.
Integrated Gradients
A gradients-based method to efficiently compute feature attributions with the same axiomatic properties as the Shapley value.
Use Cases:
- Classification and regression on tabular data.
- Classification on image data.
Parameters:
step_count: This is the number of steps to approximate the remaining sum. The more steps, the more accurate the integral approximation. The general rule of thumb is 50 steps, but as you increase so does the compute time.
XRAI
Based on the integrated gradients method, XRAI assesses overlapping regions of the image to create a saliency map, which highlights relevant regions of the image rather than pixels.
Use Cases:
Classification on image data.
Parameters:
step_count: This is the number of steps to approximate the remaining sum. The more steps, the more accurate the integral approximation. The general rule of thumb is 50 steps, but as you increase so does the compute time.
In the next code cell, set the variable XAI to which explainabilty algorithm you will use on your custom model.
End of explanation
INPUT_METADATA = {
"input_tensor_name": serving_input,
"encoding": "BAG_OF_FEATURES",
"modality": "numeric",
"index_feature_mapping": [
"crim",
"zn",
"indus",
"chas",
"nox",
"rm",
"age",
"dis",
"rad",
"tax",
"ptratio",
"b",
"lstat",
],
}
OUTPUT_METADATA = {"output_tensor_name": serving_output}
input_metadata = aip.explain.ExplanationMetadata.InputMetadata(INPUT_METADATA)
output_metadata = aip.explain.ExplanationMetadata.OutputMetadata(OUTPUT_METADATA)
metadata = aip.explain.ExplanationMetadata(
inputs={"features": input_metadata}, outputs={"medv": output_metadata}
)
Explanation: Explanation Metadata
Let's first dive deeper into the explanation metadata, which consists of:
outputs: A scalar value in the output to attribute -- what to explain. For example, in a probability output [0.1, 0.2, 0.7] for classification, one wants an explanation for 0.7. Consider the following formulae, where the output is y and that is what we want to explain.
y = f(x)
Consider the following formulae, where the outputs are y and z. Since we can only do attribution for one scalar value, we have to pick whether we want to explain the output y or z. Assume in this example the model is object detection and y and z are the bounding box and the object classification. You would want to pick which of the two outputs to explain.
y, z = f(x)
The dictionary format for outputs is:
{ "outputs": { "[your_display_name]":
"output_tensor_name": [layer]
}
}
<blockquote>
- [your_display_name]: A human readable name you assign to the output to explain. A common example is "probability".<br/>
- "output_tensor_name": The key/value field to identify the output layer to explain. <br/>
- [layer]: The output layer to explain. In a single task model, like a tabular regressor, it is the last (topmost) layer in the model.
</blockquote>
inputs: The features for attribution -- how they contributed to the output. Consider the following formulae, where a and b are the features. We have to pick which features to explain how the contributed. Assume that this model is deployed for A/B testing, where a are the data_items for the prediction and b identifies whether the model instance is A or B. You would want to pick a (or some subset of) for the features, and not b since it does not contribute to the prediction.
y = f(a,b)
The minimum dictionary format for inputs is:
{ "inputs": { "[your_display_name]":
"input_tensor_name": [layer]
}
}
<blockquote>
- [your_display_name]: A human readable name you assign to the input to explain. A common example is "features".<br/>
- "input_tensor_name": The key/value field to identify the input layer for the feature attribution. <br/>
- [layer]: The input layer for feature attribution. In a single input tensor model, it is the first (bottom-most) layer in the model.
</blockquote>
Since the inputs to the model are tabular, you can specify the following two additional fields as reporting/visualization aids:
<blockquote>
- "encoding": "BAG_OF_FEATURES" : Indicates that the inputs are set of tabular features.<br/>
- "index_feature_mapping": [ feature-names ] : A list of human readable names for each feature. For this example, we use the feature names specified in the dataset.<br/>
- "modality": "numeric": Indicates the field values are numeric.
</blockquote>
End of explanation
model = aip.Model.upload(
display_name="boston_" + TIMESTAMP,
artifact_uri=MODEL_DIR,
serving_container_image_uri=DEPLOY_IMAGE,
explanation_parameters=parameters,
explanation_metadata=metadata,
sync=False,
)
model.wait()
Explanation: Upload the model
Next, upload your model to a Model resource using Model.upload() method, with the following parameters:
display_name: The human readable name for the Model resource.
artifact: The Cloud Storage location of the trained model artifacts.
serving_container_image_uri: The serving container image.
sync: Whether to execute the upload asynchronously or synchronously.
explanation_parameters: Parameters to configure explaining for Model's predictions.
explanation_metadata: Metadata describing the Model's input and output for explanation.
If the upload() method is run asynchronously, you can subsequently block until completion with the wait() method.
End of explanation
! gsutil cat $IMPORT_FILE | head -n 1 > tmp.csv
! gsutil cat $IMPORT_FILE | tail -n 10 >> tmp.csv
! cut -d, -f1-16 tmp.csv > batch.csv
gcs_input_uri = BUCKET_NAME + "/test.csv"
! gsutil cp batch.csv $gcs_input_uri
Explanation: Send a batch prediction request
Send a batch prediction to your deployed model.
Make test items
You will use synthetic data as a test data items. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.
Make the batch input file
Now make a batch input file, which you will store in your local Cloud Storage bucket. Unlike image, video and text, the batch input file for tabular is only supported for CSV. For CSV file, you make:
The first line is the heading with the feature (fields) heading names.
Each remaining line is a separate prediction request with the corresponding feature values.
For example:
"feature_1", "feature_2". ...
value_1, value_2, ...
End of explanation
MIN_NODES = 1
MAX_NODES = 1
batch_predict_job = model.batch_predict(
job_display_name="boston_" + TIMESTAMP,
gcs_source=gcs_input_uri,
gcs_destination_prefix=BUCKET_NAME,
instances_format="csv",
predictions_format="jsonl",
machine_type=DEPLOY_COMPUTE,
starting_replica_count=MIN_NODES,
max_replica_count=MAX_NODES,
generate_explanation=True,
sync=False,
)
print(batch_predict_job)
Explanation: Make the batch explanation request
Now that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters:
job_display_name: The human readable name for the batch prediction job.
gcs_source: A list of one or more batch request input files.
gcs_destination_prefix: The Cloud Storage location for storing the batch prediction resuls.
instances_format: The format for the input instances, either 'csv' or 'jsonl'. Defaults to 'jsonl'.
predictions_format: The format for the output predictions, either 'csv' or 'jsonl'. Defaults to 'jsonl'.
generate_explanations: Set to True to generate explanations.
sync: If set to True, the call will block while waiting for the asynchronous batch job to complete.
End of explanation
if not os.environ["IS_TESTING"]:
batch_predict_job.wait()
Explanation: Wait for completion of batch prediction job
Next, wait for the batch job to complete. Alternatively, one can set the parameter sync to True in the batch_predict() method to block until the batch prediction job is completed.
End of explanation
if not os.environ["IS_TESTING"]:
import tensorflow as tf
bp_iter_outputs = batch_predict_job.iter_outputs()
explanation_results = list()
for blob in bp_iter_outputs:
if blob.name.split("/")[-1].startswith("explanation"):
explanation_results.append(blob.name)
tags = list()
for explanation_result in explanation_results:
gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{explanation_result}"
with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile:
for line in gfile.readlines():
print(line)
Explanation: Get the explanations
Next, get the explanation results from the completed batch prediction job.
The results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more explanation requests in a CSV format:
CSV header + predicted_label
CSV row + explanation, per prediction request
End of explanation
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
Explanation: Cleaning up
To clean up all Google Cloud resources used in this project, you can delete the Google Cloud
project you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
Dataset
Pipeline
Model
Endpoint
AutoML Training Job
Batch Job
Custom Job
Hyperparameter Tuning Job
Cloud Storage Bucket
End of explanation |
14,771 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Calculating Seasonal Averages from Timeseries of Monthly Means
Author
Step1: Some calendar information so we can support any netCDF calendar.
Step4: A few calendar functions to determine the number of days in each month
If you were just using the standard calendar, it would be easy to use the calendar.month_range function.
Step5: Open the Dataset
Step6: Now for the heavy lifting | Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import xarray as xr
from netCDF4 import num2date
import matplotlib.pyplot as plt
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
Explanation: Calculating Seasonal Averages from Timeseries of Monthly Means
Author: Joe Hamman
The data for this example can be found in the xray-data repository. This example is also available in an IPython Notebook that is available here.
Suppose we have a netCDF or xray Dataset of monthly mean data and we want to calculate the seasonal average. To do this properly, we need to calculate the weighted average considering that each month has a different number of days.
End of explanation
dpm = {'noleap': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'365_day': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'standard': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'gregorian': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'proleptic_gregorian': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'all_leap': [0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'366_day': [0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
'360_day': [0, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30]}
Explanation: Some calendar information so we can support any netCDF calendar.
End of explanation
def leap_year(year, calendar='standard'):
Determine if year is a leap year
leap = False
if ((calendar in ['standard', 'gregorian',
'proleptic_gregorian', 'julian']) and
(year % 4 == 0)):
leap = True
if ((calendar == 'proleptic_gregorian') and
(year % 100 == 0) and
(year % 400 != 0)):
leap = False
elif ((calendar in ['standard', 'gregorian']) and
(year % 100 == 0) and (year % 400 != 0) and
(year < 1583)):
leap = False
return leap
def get_dpm(time, calendar='standard'):
return a array of days per month corresponding to the months provided in `months`
month_length = np.zeros(len(time), dtype=np.int)
cal_days = dpm[calendar]
for i, (month, year) in enumerate(zip(time.month, time.year)):
month_length[i] = cal_days[month]
if leap_year(year, calendar=calendar):
month_length[i] += 1
return month_length
Explanation: A few calendar functions to determine the number of days in each month
If you were just using the standard calendar, it would be easy to use the calendar.month_range function.
End of explanation
monthly_mean_file = 'RASM_example_data.nc'
ds = xr.open_dataset(monthly_mean_file, decode_coords=False)
print(ds)
Explanation: Open the Dataset
End of explanation
# Make a DataArray with the number of days in each month, size = len(time)
month_length = xr.DataArray(get_dpm(ds.time.to_index(),
calendar='noleap'),
coords=[ds.time], name='month_length')
# Calculate the weights by grouping by 'time.season'.
seasons = month_length.groupby('time.season')
weights = seasons / seasons.sum()
# Test that the sum of the weights for each season is 1.0
np.testing.assert_allclose(weights.groupby('time.season').sum().values, np.ones(4))
# Calculate the weighted average
ds_weighted = (ds * weights).groupby('time.season').sum(dim='time')
ds_weighted
# only used for comparisons
ds_unweighted = ds.groupby('time.season').mean('time')
ds_diff = ds_weighted - ds_unweighted
# Quick plot to show the results
is_null = np.isnan(ds_unweighted['Tair'][0].values)
fig, axes = plt.subplots(nrows=4, ncols=3, figsize=(14,12))
for i, season in enumerate(('DJF', 'MAM', 'JJA', 'SON')):
plt.sca(axes[i, 0])
plt.pcolormesh(np.ma.masked_where(is_null, ds_weighted['Tair'].sel(season=season).values),
vmin=-30, vmax=30, cmap='Spectral_r')
plt.colorbar(extend='both')
plt.sca(axes[i, 1])
plt.pcolormesh(np.ma.masked_where(is_null, ds_unweighted['Tair'].sel(season=season).values),
vmin=-30, vmax=30, cmap='Spectral_r')
plt.colorbar(extend='both')
plt.sca(axes[i, 2])
plt.pcolormesh(np.ma.masked_where(is_null, ds_diff['Tair'].sel(season=season).values),
vmin=-0.1, vmax=.1, cmap='RdBu_r')
plt.colorbar(extend='both')
for j in range(3):
axes[i, j].axes.get_xaxis().set_ticklabels([])
axes[i, j].axes.get_yaxis().set_ticklabels([])
axes[i, j].axes.axis('tight')
axes[i, 0].set_ylabel(season)
axes[0, 0].set_title('Weighted by DPM')
axes[0, 1].set_title('Equal Weighting')
axes[0, 2].set_title('Difference')
plt.tight_layout()
fig.suptitle('Seasonal Surface Air Temperature', fontsize=16, y=1.02)
Explanation: Now for the heavy lifting:
We first have to come up with the weights, - calculate the month lengths for each monthly data record - calculate weights using groupby('time.season')
From Norman: http://xarray.pydata.org/en/stable/time-series.html#datetime-components
Finally, we just need to multiply our weights by the Dataset and sum allong the time dimension.
End of explanation |
14,772 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step1: Batch Normalization
One way to make deep networks easier to train is to use more sophisticated optimization procedures such as SGD+momentum, RMSProp, or Adam. Another strategy is to change the architecture of the network to make it easier to train. One idea along these lines is batch normalization which was recently proposed by [3].
The idea is relatively straightforward. Machine learning methods tend to work better when their input data consists of uncorrelated features with zero mean and unit variance. When training a neural network, we can preprocess the data before feeding it to the network to explicitly decorrelate its features; this will ensure that the first layer of the network sees data that follows a nice distribution. However even if we preprocess the input data, the activations at deeper layers of the network will likely no longer be decorrelated and will no longer have zero mean or unit variance since they are output from earlier layers in the network. Even worse, during the training process the distribution of features at each layer of the network will shift as the weights of each layer are updated.
The authors of [3] hypothesize that the shifting distribution of features inside deep neural networks may make training deep networks more difficult. To overcome this problem, [3] proposes to insert batch normalization layers into the network. At training time, a batch normalization layer uses a minibatch of data to estimate the mean and standard deviation of each feature. These estimated means and standard deviations are then used to center and normalize the features of the minibatch. A running average of these means and standard deviations is kept during training, and at test time these running averages are used to center and normalize features.
It is possible that this normalization strategy could reduce the representational power of the network, since it may sometimes be optimal for certain layers to have features that are not zero-mean or unit variance. To this end, the batch normalization layer includes learnable shift and scale parameters for each feature dimension.
[3] Sergey Ioffe and Christian Szegedy, "Batch Normalization
Step2: Batch normalization
Step3: Batch Normalization
Step4: Batch Normalization
Step5: Fully Connected Nets with Batch Normalization
Now that you have a working implementation for batch normalization, go back to your FullyConnectedNet in the file cs2312n/classifiers/fc_net.py. Modify your implementation to add batch normalization.
Concretely, when the flag use_batchnorm is True in the constructor, you should insert a batch normalization layer before each ReLU nonlinearity. The outputs from the last layer of the network should not be normalized. Once you are done, run the following to gradient-check your implementation.
HINT
Step6: Batchnorm for deep networks
Run the following to train a six-layer network on a subset of 1000 training examples both with and without batch normalization.
Step7: Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.
Step8: Batch normalization and initialization
We will now run a small experiment to study the interaction of batch normalization and weight initialization.
The first cell will train 8-layer networks both with and without batch normalization using different scales for weight initialization. The second layer will plot training accuracy, validation set accuracy, and training loss as a function of the weight initialization scale. | Python Code:
# As usual, a bit of setup
from __future__ import print_function
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.items():
print('%s: ' % k, v.shape)
Explanation: Batch Normalization
One way to make deep networks easier to train is to use more sophisticated optimization procedures such as SGD+momentum, RMSProp, or Adam. Another strategy is to change the architecture of the network to make it easier to train. One idea along these lines is batch normalization which was recently proposed by [3].
The idea is relatively straightforward. Machine learning methods tend to work better when their input data consists of uncorrelated features with zero mean and unit variance. When training a neural network, we can preprocess the data before feeding it to the network to explicitly decorrelate its features; this will ensure that the first layer of the network sees data that follows a nice distribution. However even if we preprocess the input data, the activations at deeper layers of the network will likely no longer be decorrelated and will no longer have zero mean or unit variance since they are output from earlier layers in the network. Even worse, during the training process the distribution of features at each layer of the network will shift as the weights of each layer are updated.
The authors of [3] hypothesize that the shifting distribution of features inside deep neural networks may make training deep networks more difficult. To overcome this problem, [3] proposes to insert batch normalization layers into the network. At training time, a batch normalization layer uses a minibatch of data to estimate the mean and standard deviation of each feature. These estimated means and standard deviations are then used to center and normalize the features of the minibatch. A running average of these means and standard deviations is kept during training, and at test time these running averages are used to center and normalize features.
It is possible that this normalization strategy could reduce the representational power of the network, since it may sometimes be optimal for certain layers to have features that are not zero-mean or unit variance. To this end, the batch normalization layer includes learnable shift and scale parameters for each feature dimension.
[3] Sergey Ioffe and Christian Szegedy, "Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift", ICML 2015.
End of explanation
# Check the training-time forward pass by checking means and variances
# of features both before and after batch normalization
# Simulate the forward pass for a two-layer network
np.random.seed(231)
N, D1, D2, D3 = 200, 50, 60, 3
X = np.random.randn(N, D1)
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
a = np.maximum(0, X.dot(W1)).dot(W2)
print('Before batch normalization:')
print(' means: ', a.mean(axis=0))
print(' stds: ', a.std(axis=0))
# Means should be close to zero and stds close to one
print('After batch normalization (gamma=1, beta=0)')
a_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'})
print(' mean: ', a_norm.mean(axis=0))
print(' std: ', a_norm.std(axis=0))
# Now means should be close to beta and stds close to gamma
gamma = np.asarray([1.0, 2.0, 3.0])
beta = np.asarray([11.0, 12.0, 13.0])
a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'})
print('After batch normalization (nontrivial gamma, beta)')
print(' means: ', a_norm.mean(axis=0))
print(' stds: ', a_norm.std(axis=0))
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
np.random.seed(231)
N, D1, D2, D3 = 200, 50, 60, 3
W1 = np.random.randn(D1, D2)
W2 = np.random.randn(D2, D3)
bn_param = {'mode': 'train'}
gamma = np.ones(D3)
beta = np.zeros(D3)
for t in range(50):
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
batchnorm_forward(a, gamma, beta, bn_param)
bn_param['mode'] = 'test'
X = np.random.randn(N, D1)
a = np.maximum(0, X.dot(W1)).dot(W2)
a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param)
# Means should be close to zero and stds close to one, but will be
# noisier than training-time forward passes.
print('After batch normalization (test-time):')
print(' means: ', a_norm.mean(axis=0))
print(' stds: ', a_norm.std(axis=0))
Explanation: Batch normalization: Forward
In the file cs231n/layers.py, implement the batch normalization forward pass in the function batchnorm_forward. Once you have done so, run the following to test your implementation.
End of explanation
# Gradient check batchnorm backward pass
np.random.seed(231)
N, D = 4, 5
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0]
fg = lambda a: batchnorm_forward(x, a, beta, bn_param)[0]
fb = lambda b: batchnorm_forward(x, gamma, b, bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
da_num = eval_numerical_gradient_array(fg, gamma.copy(), dout)
db_num = eval_numerical_gradient_array(fb, beta.copy(), dout)
_, cache = batchnorm_forward(x, gamma, beta, bn_param)
dx, dgamma, dbeta = batchnorm_backward(dout, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dgamma error: ', rel_error(da_num, dgamma))
print('dbeta error: ', rel_error(db_num, dbeta))
Explanation: Batch Normalization: backward
Now implement the backward pass for batch normalization in the function batchnorm_backward.
To derive the backward pass you should write out the computation graph for batch normalization and backprop through each of the intermediate nodes. Some intermediates may have multiple outgoing branches; make sure to sum gradients across these branches in the backward pass.
Once you have finished, run the following to numerically check your backward pass.
End of explanation
np.random.seed(231)
N, D = 100, 500
x = 5 * np.random.randn(N, D) + 12
gamma = np.random.randn(D)
beta = np.random.randn(D)
dout = np.random.randn(N, D)
bn_param = {'mode': 'train'}
out, cache = batchnorm_forward(x, gamma, beta, bn_param)
t1 = time.time()
dx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache)
t2 = time.time()
dx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache)
t3 = time.time()
print('dx difference: ', rel_error(dx1, dx2))
print('dgamma difference: ', rel_error(dgamma1, dgamma2))
print('dbeta difference: ', rel_error(dbeta1, dbeta2))
print('speedup: %.2fx' % ((t2 - t1) / (t3 - t2)))
Explanation: Batch Normalization: alternative backward (OPTIONAL, +3 points extra credit)
In class we talked about two different implementations for the sigmoid backward pass. One strategy is to write out a computation graph composed of simple operations and backprop through all intermediate values. Another strategy is to work out the derivatives on paper. For the sigmoid function, it turns out that you can derive a very simple formula for the backward pass by simplifying gradients on paper.
Surprisingly, it turns out that you can also derive a simple expression for the batch normalization backward pass if you work out derivatives on paper and simplify. After doing so, implement the simplified batch normalization backward pass in the function batchnorm_backward_alt and compare the two implementations by running the following. Your two implementations should compute nearly identical results, but the alternative implementation should be a bit faster.
NOTE: This part of the assignment is entirely optional, but we will reward 3 points of extra credit if you can complete it.
End of explanation
np.random.seed(231)
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print('Running check with reg = ', reg)
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64,
use_batchnorm=True)
loss, grads = model.loss(X, y)
print('Initial loss: ', loss)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))
if reg == 0: print()
Explanation: Fully Connected Nets with Batch Normalization
Now that you have a working implementation for batch normalization, go back to your FullyConnectedNet in the file cs2312n/classifiers/fc_net.py. Modify your implementation to add batch normalization.
Concretely, when the flag use_batchnorm is True in the constructor, you should insert a batch normalization layer before each ReLU nonlinearity. The outputs from the last layer of the network should not be normalized. Once you are done, run the following to gradient-check your implementation.
HINT: You might find it useful to define an additional helper layer similar to those in the file cs231n/layer_utils.py. If you decide to do so, do it in the file cs231n/classifiers/fc_net.py.
End of explanation
np.random.seed(231)
# Try training a very deep net with batchnorm
hidden_dims = [100, 100, 100, 100, 100]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 2e-2
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
bn_solver.train()
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=200)
solver.train()
Explanation: Batchnorm for deep networks
Run the following to train a six-layer network on a subset of 1000 training examples both with and without batch normalization.
End of explanation
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label='baseline')
plt.plot(bn_solver.loss_history, 'o', label='batchnorm')
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label='baseline')
plt.plot(bn_solver.train_acc_history, '-o', label='batchnorm')
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label='baseline')
plt.plot(bn_solver.val_acc_history, '-o', label='batchnorm')
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
Explanation: Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster.
End of explanation
np.random.seed(231)
# Try training a very deep net with batchnorm
hidden_dims = [50, 50, 50, 50, 50, 50, 50]
num_train = 1000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
bn_solvers = {}
solvers = {}
weight_scales = np.logspace(-4, 0, num=20)
for i, weight_scale in enumerate(weight_scales):
print('Running weight scale %d / %d' % (i + 1, len(weight_scales)))
bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True)
model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False)
bn_solver = Solver(bn_model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
bn_solver.train()
bn_solvers[weight_scale] = bn_solver
solver = Solver(model, small_data,
num_epochs=10, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=False, print_every=200)
solver.train()
solvers[weight_scale] = solver
# Plot results of weight scale experiment
best_train_accs, bn_best_train_accs = [], []
best_val_accs, bn_best_val_accs = [], []
final_train_loss, bn_final_train_loss = [], []
for ws in weight_scales:
best_train_accs.append(max(solvers[ws].train_acc_history))
bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history))
best_val_accs.append(max(solvers[ws].val_acc_history))
bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history))
final_train_loss.append(np.mean(solvers[ws].loss_history[-100:]))
bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history[-100:]))
plt.subplot(3, 1, 1)
plt.title('Best val accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best val accuracy')
plt.semilogx(weight_scales, best_val_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm')
plt.tight_layout()
plt.legend(ncol=2, loc='lower right')
plt.subplot(3, 1, 2)
plt.title('Best train accuracy vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Best training accuracy')
plt.semilogx(weight_scales, best_train_accs, '-o', label='baseline')
plt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm')
plt.tight_layout()
plt.legend()
plt.subplot(3, 1, 3)
plt.title('Final training loss vs weight initialization scale')
plt.xlabel('Weight initialization scale')
plt.ylabel('Final training loss')
plt.semilogx(weight_scales, final_train_loss, '-o', label='baseline')
plt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm')
plt.tight_layout()
plt.legend()
plt.gca().set_ylim(1.0, 3.5)
plt.gcf().set_size_inches(10, 15)
plt.show()
Explanation: Batch normalization and initialization
We will now run a small experiment to study the interaction of batch normalization and weight initialization.
The first cell will train 8-layer networks both with and without batch normalization using different scales for weight initialization. The second layer will plot training accuracy, validation set accuracy, and training loss as a function of the weight initialization scale.
End of explanation |
14,773 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
The EM algorithm for Hawkes processes
Here we explore the optimisation algorithm for parameter estimation given in
Mohler et al. "Randomized Controlled Field Trials of Predictive Policing". Journal of the American Statistical Association (2015) DOI
Step1: Checking the simulation
Step2: Background rate
Should be a homogeneous Poisson process of rate 0.1
Step3: Aftershocks
Should be exponential, with an "exponential weight" on 0.05, and "intensity" of 1.0. So the conditional intensity function should be $\theta \omega e^{-\omega t}$ with $\theta=1, \omega=0.05$.
Step4: We only sample the process in a finite time interval, so we'll miss aftershocks which occur after the end of our time window.
To correct for this, using the extra (normally hidden) information we have about our simulation, we can discard aftershocks which occur near the end of our time window.
Step5: To get the normalisation correct above, we need to think about what the "intensity" parameter $\theta$ represents.
Each background event gives rise to $n$ aftershock events, where $n$ is distributed as a Poisson random variable with mean $\theta$.
Each of these initial aftershocks gives rise to further aftershocks, again with the same distribution.
And so on.
The total number of events, counting also the initial event, is then
$$ 1 + \theta + \theta^2 + \cdots = \frac{1}{1-\theta} $$
supposing $\theta<1$. This is a standard result in the theory of Branching Processes, see e.g. Lectures notes, page 9 So let $\alpha$ be the count of aftershock events, and $\beta$ the count of background events, $\alpha + \beta$ is the total number of events. Then
$$ \frac{\alpha}{\beta} = \frac{1}{1-\theta} - 1 = \frac{\theta}{1-\theta} \implies
\theta = \frac{\alpha / \beta}{1+\alpha/\beta} =\frac{\alpha}{\alpha+\beta} $$
The EM algorithm
Here we implement the "EM" algorithm described by Mohler et al (2015).
Step6: For a largish sample size, this is typical behaviour-- we converge to an estimate which under-estimates $\theta$ and slightly over-estimates $\omega$ and $\mu$.
So we think there are slightly more background events than there really are
For the aftershocks, we underestimate the mean value of the exponential (so believe aftershocks occur closer in time than they really do) and underestimate the total aftershock intensity.
I cannot rigourously justisfy this, but I believe this is due to edge effects-- events which occur near the end of our time window are likely to trigger aftershocks which we do not observe (because they fall after the time window) and so we underestimate the intensity of aftershocks, and so compensate by overestimating the background rate.
It must be said that with repeated runs, you can also get the opposite behaviour-- $\theta$ being over-estimated.
For a small sample
Step7: This is a slightly extreme graph, but it can certainly occur.
Using a window of data
If our model is correct, then note that "real-world" will not start from time 0, but will instead be a window onto an on-going process.
Step8: This is a typical graph, though more extreme behaviour can be observed as well!
Step9: Choosing different parameters
The above work is all performed with $\mu=0.1, \omega=0.05, \theta=0.5$. The choice of $\omega$ is problematic, as this means that the average time between event and aftershock is $20$ time units, 2 times longer than the expected time to the next background event.
So let's repeat the work, but with $\omega=1$ say.
Step10: Finally try with $\omega=10$ and a small(er) sample.
Step11: Recreation of Lewis and Mohler
The EM algorithm which we're using originates in
Step12: Except when $\omega^{-1}=10$ these look like the graphs from the paper.
Reading more closely, we see from the paper
Step13: This is definitely an improvement, but despite having tried this a number of times, I cannot reproduce the graphs from Lewis and Mohler.
Computing the likelihood
From (3) Theorem 3 we know that if $\lambda^*(t)$ is the conditional intensity function, then the likelihood of the process is | Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
Explanation: The EM algorithm for Hawkes processes
Here we explore the optimisation algorithm for parameter estimation given in
Mohler et al. "Randomized Controlled Field Trials of Predictive Policing". Journal of the American Statistical Association (2015) DOI:10.1080/01621459.2015.1077710
Lewis, Mohler, "A Nonparametric EM algorithm for Multiscale Hawkes Processes", preprint (2011) see http://math.scu.edu/~gmohler/EM_paper.pdf
Laub et al "Hawkes Processes" arXiv:150702822v1 [math.PR]
End of explanation
import open_cp.sources.sepp as source_sepp
process = source_sepp.SelfExcitingPointProcess(
background_sampler = source_sepp.HomogeneousPoissonSampler(rate=0.1),
trigger_sampler = source_sepp.ExponentialDecaySampler(intensity=0.5, exp_rate=0.05))
Explanation: Checking the simulation
End of explanation
totality = []
trials = 50000
for _ in range(trials):
result = process.sample_with_details(0,100)
totality.extend(result.backgrounds)
bins = np.linspace(0, 100, 21)
counts = np.histogram(totality, bins)[0]
counts = counts / (trials * (bins[1] - bins[0]))
fig, ax = plt.subplots(ncols=2, figsize=(16,5))
ax[0].plot((bins[:-1] + bins[1:])/2, counts)
ax[0].set(xlim=[0,100], ylim=[0,.15])
ax[1].plot((bins[:-1] + bins[1:])/2, counts)
ax[1].set(xlim=[0,100])
for i in range(2):
ax[i].set_xlabel("time")
ax[i].set_ylabel("intensity")
None
Explanation: Background rate
Should be a homogeneous Poisson process of rate 0.1
End of explanation
totality = []
trials = 10000
for _ in range(trials):
result = process.sample_with_details(0,100)
totality.extend(result.trigger_deltas)
fig, ax = plt.subplots(ncols=2, figsize=(16,5))
bins = np.linspace(0,1,11)
xcoords = (bins[:-1] + bins[1:]) / 2
y = np.random.exponential(1 / 0.05, size=100000)
x = 1 - np.exp(-0.05 * np.asarray(y))
c = np.histogram(x, bins)[0]
ax[0].scatter(xcoords, c / 10000)
ax[0].set(xlim=[0,1], ylim=[0,1.1], title="Direct simulation from numpy")
y = np.asarray(totality)
x = 1 - np.exp(-0.05 * np.asarray(y))
c = np.histogram(x, bins)[0]
ax[1].scatter(xcoords, c / trials * 10)
ax[1].set(xlim=[0,1], title="From our process, showing edge effects")
None
Explanation: Aftershocks
Should be exponential, with an "exponential weight" on 0.05, and "intensity" of 1.0. So the conditional intensity function should be $\theta \omega e^{-\omega t}$ with $\theta=1, \omega=0.05$.
End of explanation
totality = []
trials = 1000
total_points = 0
for _ in range(trials):
result = process.sample_with_details(0,1000)
mask = result.trigger_points <= 900
totality.extend( result.trigger_deltas[mask] )
count = np.sum(result.points <= 900)
total_points += count
fig, ax = plt.subplots(ncols=2, figsize=(16,5))
y = np.asarray(totality)
x = 1 - np.exp(-0.05 * np.asarray(y))
c = np.histogram(x, bins)[0]
ax[0].scatter(xcoords, c / trials)
ax[0].set(xlim=[0,1], title="From our process, corrected for edge effects")
scale = 1 / ((bins[1] - bins[0]) * total_points)
ax[1].scatter(xcoords, c * scale)
ax[1].set(xlim=[0,1], title="Normalised to estimate $\\theta$")
None
Explanation: We only sample the process in a finite time interval, so we'll miss aftershocks which occur after the end of our time window.
To correct for this, using the extra (normally hidden) information we have about our simulation, we can discard aftershocks which occur near the end of our time window.
End of explanation
total_time = 10000
result = process.sample_with_details(0, total_time)
points = result.points
points.shape, result.trigger_deltas.shape
def p_matrix_col(points, col, theta=0.5, omega=0.05, mu=0.1):
p = np.empty(col + 1)
p[col] = mu
dt = points[col] - points[0:col]
p[0:col] = theta * omega * np.exp(-omega * dt)
return p, np.sum(p), dt
def m_step(points, total_time, theta=0.5, omega=0.05, mu=0.1):
omega_1, omega_2, mu_e = 0.0, 0.0, 0.0
for col in range(len(points)):
p, norm, dt = p_matrix_col(points, col, theta, omega, mu)
wp = p[0:col] * dt
omega_1 += np.sum(p[0:col]) / norm
omega_2 += np.sum(wp) / norm
mu_e += p[-1] / norm
return omega_1 / len(points), omega_1 / omega_2, mu_e / total_time
theta, omega, mu = m_step(points, total_time)
theta, omega, mu
def apply_algorithm(points, total_time, steps=200, theta_in=0.5, omega_in=0.05, mu_in=0.1,
convergence_criteria=None, func=m_step):
theta, omega, mu = theta_in, omega_in, mu_in
thetas, omegas, mus = [theta], [omega], [mu]
for _ in range(steps):
theta, omega, mu = func(points, total_time, theta, omega, mu)
diff = max(abs(thetas[-1] - theta), abs(omegas[-1] - omega), abs(mus[-1] - mu))
thetas.append(theta)
omegas.append(omega)
mus.append(mu)
if convergence_criteria is not None and diff <= convergence_criteria:
break
return thetas, omegas, mus
thetas, omegas, mus = apply_algorithm(points, total_time, 100)
thetas[-1], omegas[-1], mus[-1]
def plot_convergence(thetas, omegas, mus, inv_omega=False):
fig, ax = plt.subplots(figsize=(16,5))
x = list(range(len(thetas)))
ax.plot(x, thetas)
legend_txt = ["$\\theta$"]
if inv_omega:
legend_txt.append("$\\omega^{-1}$")
ax.plot(x, 1 / np.asarray(omegas))
else:
legend_txt.append("$\\omega$")
ax.plot(x, omegas)
ax.plot(x, mus)
legend_txt.append("$\\mu$")
ax.legend(legend_txt)
ax.set(xlabel="Iteration")
plot_convergence(thetas, omegas, mus)
Explanation: To get the normalisation correct above, we need to think about what the "intensity" parameter $\theta$ represents.
Each background event gives rise to $n$ aftershock events, where $n$ is distributed as a Poisson random variable with mean $\theta$.
Each of these initial aftershocks gives rise to further aftershocks, again with the same distribution.
And so on.
The total number of events, counting also the initial event, is then
$$ 1 + \theta + \theta^2 + \cdots = \frac{1}{1-\theta} $$
supposing $\theta<1$. This is a standard result in the theory of Branching Processes, see e.g. Lectures notes, page 9 So let $\alpha$ be the count of aftershock events, and $\beta$ the count of background events, $\alpha + \beta$ is the total number of events. Then
$$ \frac{\alpha}{\beta} = \frac{1}{1-\theta} - 1 = \frac{\theta}{1-\theta} \implies
\theta = \frac{\alpha / \beta}{1+\alpha/\beta} =\frac{\alpha}{\alpha+\beta} $$
The EM algorithm
Here we implement the "EM" algorithm described by Mohler et al (2015).
End of explanation
total_time = 1000
result = process.sample_with_details(0, total_time)
points = result.points
points.shape, result.trigger_deltas.shape
plot_convergence(*apply_algorithm(points, total_time, 200))
Explanation: For a largish sample size, this is typical behaviour-- we converge to an estimate which under-estimates $\theta$ and slightly over-estimates $\omega$ and $\mu$.
So we think there are slightly more background events than there really are
For the aftershocks, we underestimate the mean value of the exponential (so believe aftershocks occur closer in time than they really do) and underestimate the total aftershock intensity.
I cannot rigourously justisfy this, but I believe this is due to edge effects-- events which occur near the end of our time window are likely to trigger aftershocks which we do not observe (because they fall after the time window) and so we underestimate the intensity of aftershocks, and so compensate by overestimating the background rate.
It must be said that with repeated runs, you can also get the opposite behaviour-- $\theta$ being over-estimated.
For a small sample
End of explanation
total_time = 2000
result = process.sample_with_details(0, total_time)
points = result.points
points = points[points>=1000]
plot_convergence(*apply_algorithm(points, 1000, 200))
Explanation: This is a slightly extreme graph, but it can certainly occur.
Using a window of data
If our model is correct, then note that "real-world" will not start from time 0, but will instead be a window onto an on-going process.
End of explanation
total_time = 11000
result = process.sample_with_details(0, total_time)
points = result.points
points = points[points>=1000]
plot_convergence(*apply_algorithm(points, 10000, 50))
Explanation: This is a typical graph, though more extreme behaviour can be observed as well!
End of explanation
process = source_sepp.SelfExcitingPointProcess(
background_sampler = source_sepp.HomogeneousPoissonSampler(rate=0.1),
trigger_sampler = source_sepp.ExponentialDecaySampler(intensity=0.5, exp_rate=1))
points = process.sample(0, 1000)
fig, ax = plt.subplots(figsize=(16,1))
ax.scatter(points, np.random.random(len(points))*.02, alpha=0.3)
None
total_time = 10000
result = process.sample_with_details(0, total_time)
points = result.points
points.shape, result.trigger_deltas.shape
plot_convergence(*apply_algorithm(points, total_time, 200, theta_in=0.5, omega_in=1, mu_in=0.1), True)
Explanation: Choosing different parameters
The above work is all performed with $\mu=0.1, \omega=0.05, \theta=0.5$. The choice of $\omega$ is problematic, as this means that the average time between event and aftershock is $20$ time units, 2 times longer than the expected time to the next background event.
So let's repeat the work, but with $\omega=1$ say.
End of explanation
process = source_sepp.SelfExcitingPointProcess(
background_sampler = source_sepp.HomogeneousPoissonSampler(rate=0.1),
trigger_sampler = source_sepp.ExponentialDecaySampler(intensity=0.5, exp_rate=10))
points = process.sample(0, 1000)
fig, ax = plt.subplots(figsize=(16,1))
ax.scatter(points, np.random.random(len(points))*.02, alpha=0.3)
None
total_time = 1000
result = process.sample_with_details(0, total_time)
points = result.points
points.shape, result.trigger_deltas.shape
plot_convergence(*apply_algorithm(points, total_time, 200, theta_in=0.5, omega_in=10, mu_in=0.1), True)
Explanation: Finally try with $\omega=10$ and a small(er) sample.
End of explanation
all_results_dict = {}
for omega_inv in [0.01, 0.1, 1, 10]:
results = []
for _ in range(10):
process = source_sepp.SelfExcitingPointProcess(
background_sampler = source_sepp.HomogeneousPoissonSampler(rate=1),
trigger_sampler = source_sepp.ExponentialDecaySampler(intensity=0.5, exp_rate=1/omega_inv))
points = process.sample(0, 2000)
convergents = apply_algorithm(points, 2000, 2000, theta_in=0.5, omega_in=1/omega_inv, mu_in=1,
convergence_criteria = 10**(-5), func=m_step)
convergents = np.asarray(convergents)
# (theta, omega, mu), iterations
results.append((convergents[:,-1], convergents.shape[-1]))
all_results_dict[omega_inv] = results
thetas = {key: [result[0][0] for result in all_results_dict[key]] for key in all_results_dict}
mus = {key: [result[0][2] for result in all_results_dict[key]] for key in all_results_dict}
fig, ax = plt.subplots(ncols=2, figsize=(16,6))
def plot(ax, data, true_value):
x = list(data.keys())
y = np.asarray([ np.mean(data[k]) for k in x ])
ax.scatter(x, y, color="black")
yy = np.asarray([ np.std(data[k], ddof=1) for k in x ])
for x, y, dy in zip(x,y,yy):
ax.plot([x]*2, [y-dy,y+dy], color="black")
ax.plot([0.001,100], [true_value]*2, color="black", linestyle="--", linewidth=1)
ax.set(xscale="log", xlim=[0.001,100])
plot(ax[0], thetas, 0.5)
plot(ax[1], mus, 1)
ax[0].set(xlabel="$\\omega^{-1}$", ylabel="$\\theta$")
ax[1].set(xlabel="$\\omega^{-1}$", ylabel="$\\mu$")
None
Explanation: Recreation of Lewis and Mohler
The EM algorithm which we're using originates in:
Lewis, Mohler, "A Nonparametric EM algorithm for Multiscale Hawkes Processes", preprint 2010, see http://math.scu.edu/~gmohler/EM_paper.pdf
In Figure 1 of that paper, they carry out simulations (10 times) with $\mu=1, \theta=0.5$ time in $[0,2000]$ and with $\omega^{-1} = 0.01, 0.1, 1$ and $10$.
By comparison, if we rescale to $\mu=0.1$ as used above, the paper considers time in $[0,20000]$ and $\omega^{-1} = 0.001, 0.01, 0.1$ and $1$ which are more extreme values than we considered before!
As in the paper, here we run the algorithm until the difference (in $L^\infty$ norm) between iterations is less than $10^{-5}$. We are only varying $\omega$ which controls the "scale" of the aftershocks. As $\omega^{-1}$ increases, aftershocks becomes more spread-out in time, and thus, at least intuitively, it becomes harder to tell background events apart from aftershock events.
End of explanation
def corrected_m_step(points, total_time, theta=0.5, omega=0.05, mu=0.1):
omega_1, omega_2, mu_e = 0.0, 0.0, 0.0
for col in range(len(points)):
p, norm, dt = p_matrix_col(points, col, theta, omega, mu)
wp = p[0:col] * dt
omega_1 += np.sum(p[0:col]) / norm
omega_2 += np.sum(wp) / norm
mu_e += p[-1] / norm
from_end = total_time - points
exp_from_end = np.exp(-omega * from_end)
corrected_n = len(points) - np.sum(exp_from_end)
corrected_omega_2 = omega_2 + theta * np.sum(from_end * exp_from_end)
return omega_1 / corrected_n, omega_1 / corrected_omega_2, mu_e / total_time
all_results_dict = {}
for omega_inv in [0.01, 0.1, 1, 10]:
results = []
for _ in range(10):
process = source_sepp.SelfExcitingPointProcess(
background_sampler = source_sepp.HomogeneousPoissonSampler(rate=1),
trigger_sampler = source_sepp.ExponentialDecaySampler(intensity=0.5, exp_rate=1/omega_inv))
points = process.sample(0, 2000)
convergents = apply_algorithm(points, 2000, 2000, theta_in=0.5, omega_in=1/omega_inv, mu_in=1,
convergence_criteria = 10**(-5), func=corrected_m_step)
convergents = np.asarray(convergents)
# (theta, omega, mu), iterations
results.append((convergents[:,-1], convergents.shape[-1]))
all_results_dict[omega_inv] = results
thetas = {key: [result[0][0] for result in all_results_dict[key]] for key in all_results_dict}
mus = {key: [result[0][2] for result in all_results_dict[key]] for key in all_results_dict}
fig, ax = plt.subplots(ncols=2, figsize=(16,6))
plot(ax[0], thetas, 0.5)
plot(ax[1], mus, 1)
ax[0].set(xlabel="$\\omega^{-1}$", ylabel="$\\theta$")
ax[1].set(xlabel="$\\omega^{-1}$", ylabel="$\\mu$")
None
Explanation: Except when $\omega^{-1}=10$ these look like the graphs from the paper.
Reading more closely, we see from the paper:
To investigate the convergence of (14) we simulate realizations ... run the EM algorithm (with boundary correction)
This is misleading, as (14) refers to the uncorrected algorithm. By "boundary correction" we mean computing the likelihood without applying certain approximations. This leads to a variant on the "M step":
$$ \theta = \frac{\sum_{i<j} p_{ij}}{n - \sum_{i=1}^n e^{-\omega (T-t_i)}}, \qquad
\omega = \frac{\sum_{i<j} p_{ij}}{\sum_{i<j}(t_j-t_i)p_{ij} + \theta \sum_{i=1}^n(T-t_i)e^{-\omega(T-t_i)}} $$
End of explanation
def likelihood(points, time_range, theta, omega, mu):
n = len(points)
first_sum = np.empty(n)
first_sum[0] = mu
for i in range(1, n):
dt = points[i] - points[:i]
first_sum[i] = mu + theta * omega * np.sum(np.exp(-omega * dt))
second_sum = np.sum(np.exp(-omega * (time_range - points)))
return np.sum(np.log(first_sum)) - (mu * time_range + n * theta + theta - theta * second_sum)
omega_inv = 10
process = source_sepp.SelfExcitingPointProcess(
background_sampler = source_sepp.HomogeneousPoissonSampler(rate=1),
trigger_sampler = source_sepp.ExponentialDecaySampler(intensity=0.5, exp_rate=1/omega_inv))
points = process.sample(0, 2000)
convergents = apply_algorithm(points, 2000, 2000, theta_in=0.5, omega_in=1/omega_inv, mu_in=1,
convergence_criteria = 10**(-5), func=corrected_m_step)
convergents = np.asarray(convergents)
theta, omega, mu = convergents[:,-1]
theta, omega, mu
likelihood(points, 2000, theta, omega, mu)
likelihood(points, 2000, 0.5, 0.1, 1)
Explanation: This is definitely an improvement, but despite having tried this a number of times, I cannot reproduce the graphs from Lewis and Mohler.
Computing the likelihood
From (3) Theorem 3 we know that if $\lambda^*(t)$ is the conditional intensity function, then the likelihood of the process is:
$$ L = \Big( \prod_{i=1}^n \lambda^(t_i) \Big) \exp\Big( -\int_0^T \lambda^(s) \ ds \Big) $$
were $(t_1,\cdots,t_n)$ is a sample of the process in the time window $[0,T]$. Taking logs, this expands to
$$ l = \log L = \sum_{i=1}^n \log\Big( \mu + \sum_{j=1}^{i-1} \theta\omega e^{-\omega (t_i-t_j)} \Big)
- \int_0^T \lambda^(s) \ ds. $$
The integral can be split into parts:
\begin{align} \int_0^T \lambda^(s) \ ds &= \mu T + \sum_{i=1}^{n-1} \int_{t_i}^{t_{i+1}} \sum_{j=1}^i \theta\omega e^{-\omega (t-t_j)} \ dt + \int_{t_n}^T \sum_{j=1}^n \theta\omega e^{-\omega (t-t_j)} \ dt \
&= \mu T + \theta \sum_{i=1}^{n-1} \sum_{j=1}^i \Big( e^{-\omega (t_i-t_j)} - e^{-\omega (t_{i+1}-t_j)} \Big)
+ \theta \sum_{j=1}^n \Big( e^{-\omega (t_n-t_j)} - e^{-\omega (T-t_j)} \Big)
\end{align}
Many terms in the double sum cancel out, leaving
\begin{align}
& \mu T + \theta \sum_{i=1}^{n-1} e^{-\omega (t_i-t_i)} - \theta \sum_{j=1}^{n-1} e^{-\omega (t_n-t_j)}
+ \theta \sum_{j=1}^n \Big( e^{-\omega (t_n-t_j)} - e^{-\omega (T-t_j)} \Big) \
&= \mu T + n \theta + \theta - \theta \sum_{j=1}^n e^{-\omega (T-t_j)}
\end{align}
End of explanation |
14,774 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<a href="https
Step1: Neural style transfer
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https
Step2: Download images and choose a style image and a content image
Step3: Visualize the input
Define a function to load an image and limit its maximum dimension to 512 pixels.
Step4: Create a simple function to display an image
Step5: Fast Style Transfer using TF-Hub
This tutorial demonstrates the original style-transfer algorithm. Which optimizes the image content to a particular style. Before getting into the details let's see how the TensorFlow Hub module does
Step6: Define content and style representations
Use the intermediate layers of the model to get the content and style representations of the image. Starting from the network's input layer, the first few layer activations represent low-level features like edges and textures. As you step through the network, the final few layers represent higher-level features—object parts like wheels or eyes. In this case, you are using the VGG19 network architecture, a pretrained image classification network. These intermediate layers are necessary to define the representation of content and style from the images. For an input image, try to match the corresponding style and content target representations at these intermediate layers.
Load a VGG19 and test run it on our image to ensure it's used correctly
Step7: Now load a VGG19 without the classification head, and list the layer names
Step8: Choose intermediate layers from the network to represent the style and content of the image
Step10: Intermediate layers for style and content
So why do these intermediate outputs within our pretrained image classification network allow us to define style and content representations?
At a high level, in order for a network to perform image classification (which this network has been trained to do), it must understand the image. This requires taking the raw image as input pixels and building an internal representation that converts the raw image pixels into a complex understanding of the features present within the image.
This is also a reason why convolutional neural networks are able to generalize well
Step11: And to create the model
Step12: Calculate style
The content of an image is represented by the values of the intermediate feature maps.
It turns out, the style of an image can be described by the means and correlations across the different feature maps. Calculate a Gram matrix that includes this information by taking the outer product of the feature vector with itself at each location, and averaging that outer product over all locations. This Gram matrix can be calcualted for a particular layer as
Step13: Extract style and content
Build a model that returns the style and content tensors.
Step14: When called on an image, this model returns the gram matrix (style) of the style_layers and content of the content_layers
Step15: Run gradient descent
With this style and content extractor, you can now implement the style transfer algorithm. Do this by calculating the mean square error for your image's output relative to each target, then take the weighted sum of these losses.
Set your style and content target values
Step16: Define a tf.Variable to contain the image to optimize. To make this quick, initialize it with the content image (the tf.Variable must be the same shape as the content image)
Step17: Since this is a float image, define a function to keep the pixel values between 0 and 1
Step18: Create an optimizer. The paper recommends LBFGS, but Adam works okay, too
Step19: To optimize this, use a weighted combination of the two losses to get the total loss
Step20: Use tf.GradientTape to update the image.
Step21: Now run a few steps to test
Step22: Since it's working, perform a longer optimization
Step23: Total variation loss
One downside to this basic implementation is that it produces a lot of high frequency artifacts. Decrease these using an explicit regularization term on the high frequency components of the image. In style transfer, this is often called the total variation loss
Step24: This shows how the high frequency components have increased.
Also, this high frequency component is basically an edge-detector. You can get similar output from the Sobel edge detector, for example
Step25: The regularization loss associated with this is the sum of the squares of the values
Step26: That demonstrated what it does. But there's no need to implement it yourself, TensorFlow includes a standard implementation
Step27: Re-run the optimization
Choose a weight for the total_variation_loss
Step28: Now include it in the train_step function
Step29: Reinitialize the optimization variable
Step30: And run the optimization
Step31: Finally, save the result | Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Explanation: <a href="https://colab.research.google.com/github/Mayreck/Lawl/blob/master/Mayreck_style_transfer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Copyright 2018 The TensorFlow Authors.
End of explanation
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
import IPython.display as display
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (12,12)
mpl.rcParams['axes.grid'] = False
import numpy as np
import PIL.Image
import time
import functools
def tensor_to_image(tensor):
tensor = tensor*255
tensor = np.array(tensor, dtype=np.uint8)
if np.ndim(tensor)>3:
assert tensor.shape[0] == 1
tensor = tensor[0]
return PIL.Image.fromarray(tensor)
Explanation: Neural style transfer
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/generative/style_transfer"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/style_transfer.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/style_transfer.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/generative/style_transfer.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This tutorial uses deep learning to compose one image in the style of another image (ever wish you could paint like Picasso or Van Gogh?). This is known as neural style transfer and the technique is outlined in <a href="https://arxiv.org/abs/1508.06576" class="external">A Neural Algorithm of Artistic Style</a> (Gatys et al.).
Note: This tutorial demonstrates the original style-transfer algorithm. It optimizes the image content to a particular style. Modern approaches train a model to generate the stylized image directly (similar to cyclegan). This approach is much faster (up to 1000x). A pretrained Arbitrary Image Stylization module is available in TensorFlow Hub, and for TensorFlow Lite.
Neural style transfer is an optimization technique used to take two images—a content image and a style reference image (such as an artwork by a famous painter)—and blend them together so the output image looks like the content image, but “painted” in the style of the style reference image.
This is implemented by optimizing the output image to match the content statistics of the content image and the style statistics of the style reference image. These statistics are extracted from the images using a convolutional network.
For example, let’s take an image of this dog and Wassily Kandinsky's Composition 7:
<img src="https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg" width="500px"/>
Yellow Labrador Looking, from Wikimedia Commons
<img src="https://storage.googleapis.com/download.tensorflow.org/example_images/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg" width="500px"/>
Now how would it look like if Kandinsky decided to paint the picture of this Dog exclusively with this style? Something like this?
<img src="https://tensorflow.org/tutorials/generative/images/stylized-image.png" style="width: 500px;"/>
Setup
Import and configure modules
End of explanation
content_path = tf.keras.utils.get_file('YellowLabwsssissllradorLooking_new.jpg', 'https://scontent-ort2-2.xx.fbcdn.net/v/t1.0-9/p720x720/59937930_10215881059314139_5688558061837352960_o.jpg?_nc_cat=111&_nc_ohc=qP8P8DkPwuoAQm9f3GOD9rV4LhXzlhkap9qvd9HyB1oufY8FE9ouKHa-Q&_nc_ht=scontent-ort2-2.xx&oh=0034336b015ee08955e96201912ce326&oe=5E745E7C')
# https://commons.wikimedia.org/wiki/File:Vassily_Kandinsky,_1913_-_Composition_7.jpg
style_path = tf.keras.utils.get_file('kanssssdisnssssskys5.jpg','https://previews.123rf.com/images/zagory/zagory1706/zagory170600171/81008217-abstract-stained-glass-background-the-colored-elements-arranged-in-rainbow-spectrum.jpg')
Explanation: Download images and choose a style image and a content image:
End of explanation
def load_img(path_to_img):
max_dim = 1024
img = tf.io.read_file(path_to_img)
img = tf.image.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
shape = tf.cast(tf.shape(img)[:-1], tf.float32)
long_dim = max(shape)
scale = max_dim / long_dim
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape)
img = img[tf.newaxis, :]
return img
Explanation: Visualize the input
Define a function to load an image and limit its maximum dimension to 512 pixels.
End of explanation
def imshow(image, title=None):
if len(image.shape) > 3:
image = tf.squeeze(image, axis=0)
plt.imshow(image)
if title:
plt.title(title)
content_image = load_img(content_path)
style_image = load_img(style_path)
plt.subplot(1, 2, 1)
imshow(content_image, 'Content Image')
plt.subplot(1, 2, 2)
imshow(style_image, 'Style Image')
Explanation: Create a simple function to display an image:
End of explanation
import tensorflow_hub as hub
hub_module = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/1')
stylized_image = hub_module(tf.constant(content_image), tf.constant(style_image))[0]
tensor_to_image(stylized_image)
Explanation: Fast Style Transfer using TF-Hub
This tutorial demonstrates the original style-transfer algorithm. Which optimizes the image content to a particular style. Before getting into the details let's see how the TensorFlow Hub module does:
End of explanation
x = tf.keras.applications.vgg19.preprocess_input(content_image*255)
x = tf.image.resize(x, (224, 224))
vgg = tf.keras.applications.VGG19(include_top=True, weights='imagenet')
prediction_probabilities = vgg(x)
prediction_probabilities.shape
predicted_top_5 = tf.keras.applications.vgg19.decode_predictions(prediction_probabilities.numpy())[0]
[(class_name, prob) for (number, class_name, prob) in predicted_top_5]
Explanation: Define content and style representations
Use the intermediate layers of the model to get the content and style representations of the image. Starting from the network's input layer, the first few layer activations represent low-level features like edges and textures. As you step through the network, the final few layers represent higher-level features—object parts like wheels or eyes. In this case, you are using the VGG19 network architecture, a pretrained image classification network. These intermediate layers are necessary to define the representation of content and style from the images. For an input image, try to match the corresponding style and content target representations at these intermediate layers.
Load a VGG19 and test run it on our image to ensure it's used correctly:
End of explanation
vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')
print()
for layer in vgg.layers:
print(layer.name)
Explanation: Now load a VGG19 without the classification head, and list the layer names
End of explanation
# Content layer where will pull our feature maps
content_layers = ['block5_conv2']
# Style layer of interest
style_layers = ['block1_conv1',
'block2_conv1',
'block3_conv1',
'block4_conv1',
'block5_conv1']
num_content_layers = len(content_layers)
num_style_layers = len(style_layers)
Explanation: Choose intermediate layers from the network to represent the style and content of the image:
End of explanation
def vgg_layers(layer_names):
Creates a vgg model that returns a list of intermediate output values.
# Load our model. Load pretrained VGG, trained on imagenet data
vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')
vgg.trainable = False
outputs = [vgg.get_layer(name).output for name in layer_names]
model = tf.keras.Model([vgg.input], outputs)
return model
Explanation: Intermediate layers for style and content
So why do these intermediate outputs within our pretrained image classification network allow us to define style and content representations?
At a high level, in order for a network to perform image classification (which this network has been trained to do), it must understand the image. This requires taking the raw image as input pixels and building an internal representation that converts the raw image pixels into a complex understanding of the features present within the image.
This is also a reason why convolutional neural networks are able to generalize well: they’re able to capture the invariances and defining features within classes (e.g. cats vs. dogs) that are agnostic to background noise and other nuisances. Thus, somewhere between where the raw image is fed into the model and the output classification label, the model serves as a complex feature extractor. By accessing intermediate layers of the model, you're able to describe the content and style of input images.
Build the model
The networks in tf.keras.applications are designed so you can easily extract the intermediate layer values using the Keras functional API.
To define a model using the functional API, specify the inputs and outputs:
model = Model(inputs, outputs)
This following function builds a VGG19 model that returns a list of intermediate layer outputs:
End of explanation
style_extractor = vgg_layers(style_layers)
style_outputs = style_extractor(style_image*255)
#Look at the statistics of each layer's output
for name, output in zip(style_layers, style_outputs):
print(name)
print(" shape: ", output.numpy().shape)
print(" min: ", output.numpy().min())
print(" max: ", output.numpy().max())
print(" mean: ", output.numpy().mean())
print()
Explanation: And to create the model:
End of explanation
def gram_matrix(input_tensor):
result = tf.linalg.einsum('bijc,bijd->bcd', input_tensor, input_tensor)
input_shape = tf.shape(input_tensor)
num_locations = tf.cast(input_shape[1]*input_shape[2], tf.float32)
return result/(num_locations)
Explanation: Calculate style
The content of an image is represented by the values of the intermediate feature maps.
It turns out, the style of an image can be described by the means and correlations across the different feature maps. Calculate a Gram matrix that includes this information by taking the outer product of the feature vector with itself at each location, and averaging that outer product over all locations. This Gram matrix can be calcualted for a particular layer as:
$$G^l_{cd} = \frac{\sum_{ij} F^l_{ijc}(x)F^l_{ijd}(x)}{IJ}$$
This can be implemented concisely using the tf.linalg.einsum function:
End of explanation
class StyleContentModel(tf.keras.models.Model):
def __init__(self, style_layers, content_layers):
super(StyleContentModel, self).__init__()
self.vgg = vgg_layers(style_layers + content_layers)
self.style_layers = style_layers
self.content_layers = content_layers
self.num_style_layers = len(style_layers)
self.vgg.trainable = False
def call(self, inputs):
"Expects float input in [0,1]"
inputs = inputs*255.0
preprocessed_input = tf.keras.applications.vgg19.preprocess_input(inputs)
outputs = self.vgg(preprocessed_input)
style_outputs, content_outputs = (outputs[:self.num_style_layers],
outputs[self.num_style_layers:])
style_outputs = [gram_matrix(style_output)
for style_output in style_outputs]
content_dict = {content_name:value
for content_name, value
in zip(self.content_layers, content_outputs)}
style_dict = {style_name:value
for style_name, value
in zip(self.style_layers, style_outputs)}
return {'content':content_dict, 'style':style_dict}
Explanation: Extract style and content
Build a model that returns the style and content tensors.
End of explanation
extractor = StyleContentModel(style_layers, content_layers)
results = extractor(tf.constant(content_image))
style_results = results['style']
print('Styles:')
for name, output in sorted(results['style'].items()):
print(" ", name)
print(" shape: ", output.numpy().shape)
print(" min: ", output.numpy().min())
print(" max: ", output.numpy().max())
print(" mean: ", output.numpy().mean())
print()
print("Contents:")
for name, output in sorted(results['content'].items()):
print(" ", name)
print(" shape: ", output.numpy().shape)
print(" min: ", output.numpy().min())
print(" max: ", output.numpy().max())
print(" mean: ", output.numpy().mean())
Explanation: When called on an image, this model returns the gram matrix (style) of the style_layers and content of the content_layers:
End of explanation
style_targets = extractor(style_image)['style']
content_targets = extractor(content_image)['content']
Explanation: Run gradient descent
With this style and content extractor, you can now implement the style transfer algorithm. Do this by calculating the mean square error for your image's output relative to each target, then take the weighted sum of these losses.
Set your style and content target values:
End of explanation
image = tf.Variable(content_image)
Explanation: Define a tf.Variable to contain the image to optimize. To make this quick, initialize it with the content image (the tf.Variable must be the same shape as the content image):
End of explanation
def clip_0_1(image):
return tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
Explanation: Since this is a float image, define a function to keep the pixel values between 0 and 1:
End of explanation
opt = tf.optimizers.Adam(learning_rate=0.02, beta_1=0.99, epsilon=1e-1)
Explanation: Create an optimizer. The paper recommends LBFGS, but Adam works okay, too:
End of explanation
style_weight=1e-2
content_weight=1e4
def style_content_loss(outputs):
style_outputs = outputs['style']
content_outputs = outputs['content']
style_loss = tf.add_n([tf.reduce_mean((style_outputs[name]-style_targets[name])**2)
for name in style_outputs.keys()])
style_loss *= style_weight / num_style_layers
content_loss = tf.add_n([tf.reduce_mean((content_outputs[name]-content_targets[name])**2)
for name in content_outputs.keys()])
content_loss *= content_weight / num_content_layers
loss = style_loss + content_loss
return loss
Explanation: To optimize this, use a weighted combination of the two losses to get the total loss:
End of explanation
@tf.function()
def train_step(image):
with tf.GradientTape() as tape:
outputs = extractor(image)
loss = style_content_loss(outputs)
grad = tape.gradient(loss, image)
opt.apply_gradients([(grad, image)])
image.assign(clip_0_1(image))
Explanation: Use tf.GradientTape to update the image.
End of explanation
train_step(image)
train_step(image)
train_step(image)
tensor_to_image(image)
Explanation: Now run a few steps to test:
End of explanation
import time
start = time.time()
epochs = 10
steps_per_epoch = 100
step = 0
for n in range(epochs):
for m in range(steps_per_epoch):
step += 1
train_step(image)
print(".", end='')
display.clear_output(wait=True)
display.display(tensor_to_image(image))
print("Train step: {}".format(step))
end = time.time()
print("Total time: {:.1f}".format(end-start))
Explanation: Since it's working, perform a longer optimization:
End of explanation
def high_pass_x_y(image):
x_var = image[:,:,1:,:] - image[:,:,:-1,:]
y_var = image[:,1:,:,:] - image[:,:-1,:,:]
return x_var, y_var
x_deltas, y_deltas = high_pass_x_y(content_image)
plt.figure(figsize=(14,10))
plt.subplot(2,2,1)
imshow(clip_0_1(2*y_deltas+0.5), "Horizontal Deltas: Original")
plt.subplot(2,2,2)
imshow(clip_0_1(2*x_deltas+0.5), "Vertical Deltas: Original")
x_deltas, y_deltas = high_pass_x_y(image)
plt.subplot(2,2,3)
imshow(clip_0_1(2*y_deltas+0.5), "Horizontal Deltas: Styled")
plt.subplot(2,2,4)
imshow(clip_0_1(2*x_deltas+0.5), "Vertical Deltas: Styled")
Explanation: Total variation loss
One downside to this basic implementation is that it produces a lot of high frequency artifacts. Decrease these using an explicit regularization term on the high frequency components of the image. In style transfer, this is often called the total variation loss:
End of explanation
plt.figure(figsize=(14,10))
sobel = tf.image.sobel_edges(content_image)
plt.subplot(1,2,1)
imshow(clip_0_1(sobel[...,0]/4+0.5), "Horizontal Sobel-edges")
plt.subplot(1,2,2)
imshow(clip_0_1(sobel[...,1]/4+0.5), "Vertical Sobel-edges")
Explanation: This shows how the high frequency components have increased.
Also, this high frequency component is basically an edge-detector. You can get similar output from the Sobel edge detector, for example:
End of explanation
def total_variation_loss(image):
x_deltas, y_deltas = high_pass_x_y(image)
return tf.reduce_sum(tf.abs(x_deltas)) + tf.reduce_sum(tf.abs(y_deltas))
total_variation_loss(image).numpy()
Explanation: The regularization loss associated with this is the sum of the squares of the values:
End of explanation
tf.image.total_variation(image).numpy()
Explanation: That demonstrated what it does. But there's no need to implement it yourself, TensorFlow includes a standard implementation:
End of explanation
total_variation_weight=30
Explanation: Re-run the optimization
Choose a weight for the total_variation_loss:
End of explanation
@tf.function()
def train_step(image):
with tf.GradientTape() as tape:
outputs = extractor(image)
loss = style_content_loss(outputs)
loss += total_variation_weight*tf.image.total_variation(image)
grad = tape.gradient(loss, image)
opt.apply_gradients([(grad, image)])
image.assign(clip_0_1(image))
Explanation: Now include it in the train_step function:
End of explanation
image = tf.Variable(content_image)
Explanation: Reinitialize the optimization variable:
End of explanation
import time
start = time.time()
epochs = 10
steps_per_epoch = 100
step = 0
for n in range(epochs):
for m in range(steps_per_epoch):
step += 1
train_step(image)
print(".", end='')
display.clear_output(wait=True)
display.display(tensor_to_image(image))
print("Train step: {}".format(step))
end = time.time()
print("Total time: {:.1f}".format(end-start))
Explanation: And run the optimization:
End of explanation
file_name = 'stylized-image.png'
tensor_to_image(image).save(file_name)
try:
from google.colab import files
except ImportError:
pass
else:
files.download(file_name)
Explanation: Finally, save the result:
End of explanation |
14,775 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Introduction" data-toc-modified-id="Introduction-1">Introduction</a></span><ul class="toc-item"><li><span><a href="#Setup" data-toc-modified-id="Setup-1.1">Setup</a></span></li></ul></li><li><span><a href="#Imagined-use-case" data-toc-modified-id="Imagined-use-case-2">Imagined use case</a></span><ul class="toc-item"><li><span><a href="#PathManager" data-toc-modified-id="PathManager-2.1">PathManager</a></span></li><li><span><a href="#Import-into-ISIS" data-toc-modified-id="Import-into-ISIS-2.2">Import into ISIS</a></span></li></ul></li></ul></div>
Introduction
This package can
download,
(re-)calibrate, and
manage the storage
of Cassini ISS data.
Furthermore, it can be used to show ISS data of Saturn rings, with (hopefully) correct ring radius and azimuth as plot axes.
As a special add-on, it uses a data-file of Inner Lindblad Resonances to display them on top of the images.
Note
Step1: PathManager
The io module offers a PathManager, which knows where the data is being stored and offers object attributes with sub-paths to objects inside the respective folder
Step2: The self-representation of an object in Jupyter notebooks is already useful in this case
Step3: The downloader module offers some helpers to get data
Step4: Now look at the self-representation of the PathManager object again
Step5: Import into ISIS | Python Code:
id_ = 'N1467344745'
Explanation: <h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Introduction" data-toc-modified-id="Introduction-1">Introduction</a></span><ul class="toc-item"><li><span><a href="#Setup" data-toc-modified-id="Setup-1.1">Setup</a></span></li></ul></li><li><span><a href="#Imagined-use-case" data-toc-modified-id="Imagined-use-case-2">Imagined use case</a></span><ul class="toc-item"><li><span><a href="#PathManager" data-toc-modified-id="PathManager-2.1">PathManager</a></span></li><li><span><a href="#Import-into-ISIS" data-toc-modified-id="Import-into-ISIS-2.2">Import into ISIS</a></span></li></ul></li></ul></div>
Introduction
This package can
download,
(re-)calibrate, and
manage the storage
of Cassini ISS data.
Furthermore, it can be used to show ISS data of Saturn rings, with (hopefully) correct ring radius and azimuth as plot axes.
As a special add-on, it uses a data-file of Inner Lindblad Resonances to display them on top of the images.
Note: For the (re-)calibration the user has to have an ISIS environment installed: https://isis.astrogeology.usgs.gov/documents/InstallGuide/index.html
The pysis module which is used by pyciss will automatically activate it, if it is findable from the shell where the Python/IPython interpreter was launched.
Setup
Upon the first time, the pyciss.io module would be imported, an error will appear saying that
No configuration file [...] found.
Please run `pyciss.io.set_database_path()` and provide the path where
you want to keep your automatically downloaded images.
`pyciss` will store this path in [...], where you can easily change it later."
When calling the aforementioned function with a path, that path will be used to store the downloaded and managed ISS data.
managed refers here to the fact that all later derived data products will automatically be stored in the folder of the respective ISS image_id, under the path that the user provided at this initial setup step.
Imagined use case
The user might read a paper that uses Cassini ISS data.
A data ID is given, which is:
End of explanation
from pyciss import io
pm = io.PathManager(id_)
Explanation: PathManager
The io module offers a PathManager, which knows where the data is being stored and offers object attributes with sub-paths to objects inside the respective folder:
End of explanation
pm
Explanation: The self-representation of an object in Jupyter notebooks is already useful in this case:
End of explanation
from pyciss import downloader
downloader.download_file_id(id_)
Explanation: The downloader module offers some helpers to get data:
End of explanation
pm
Explanation: Now look at the self-representation of the PathManager object again:
End of explanation
downloader.download_and_calibrate(id_)
from osgeo import gdal
ds = gdal.Open(str(pm.raw_label))
%matplotlib nbagg
data = ds.ReadAsArray()
vmin,vmax = np.percentile(data, (1, 99))
plt.imshow(data, vmin=vmin, vmax=vmax, cmap='gray')
from pyciss.ringcube import RingCube
cube = RingCube(pm.cubepath)
cube.imshow()
Explanation: Import into ISIS
End of explanation |
14,776 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
PySAL Change Log Statistics
This notebook generates the summary statistics for a package.
It assumes you are running this under the tools directory at the toplevel of the package
Change the values only in the next cell
Step1: This notebook will generate a file in the current directory with the name "changelog_VERSION.md". You can edit and append this on front of the CHANGELOG file for the package release.
Step2: Total commits by subpackage
Step3: List Contributors
Some of our contributors have many aliases for the same identity. So, we've added a mapping to make sure that individuals are listed once (and only once).
Step12: Disaggregate by PR, Issue | Python Code:
package_name = 'spint'
release_date = '2020-09-08'
start_date = '2019-07-22'
Explanation: PySAL Change Log Statistics
This notebook generates the summary statistics for a package.
It assumes you are running this under the tools directory at the toplevel of the package
Change the values only in the next cell
End of explanation
from __future__ import print_function
import os
import json
import re
import sys
import pandas
from datetime import datetime, timedelta
from time import sleep
from subprocess import check_output
try:
from urllib import urlopen
except:
from urllib.request import urlopen
import ssl
import yaml
context = ssl._create_unverified_context()
CWD = os.path.abspath(os.path.curdir)
CWD
since_date = '--since="{start}"'.format(start=start_date)
since_date
since = datetime.strptime(start_date+" 0:0:0", "%Y-%m-%d %H:%M:%S")
since
# get __version__
f = "../{package}/__init__.py".format(package=package_name)
with open(f, 'r') as initfile:
exec(initfile.readline())
Explanation: This notebook will generate a file in the current directory with the name "changelog_VERSION.md". You can edit and append this on front of the CHANGELOG file for the package release.
End of explanation
cmd = ['git', 'log', '--oneline', since_date]
ncommits = len(check_output(cmd).splitlines())
ncommits
Explanation: Total commits by subpackage
End of explanation
identities = {'Levi John Wolf': ('ljwolf', 'Levi John Wolf'),
'Serge Rey': ('Serge Rey', 'Sergio Rey', 'sjsrey', 'serge'),
'Wei Kang': ('Wei Kang', 'weikang9009'),
'Dani Arribas-Bel': ('Dani Arribas-Bel', 'darribas')
}
def regularize_identity(string):
string = string.decode()
for name, aliases in identities.items():
for alias in aliases:
if alias in string:
string = string.replace(alias, name)
if len(string.split(' '))>1:
string = string.title()
return string.lstrip('* ')
author_cmd = ['git', 'log', '--format=* %aN', since_date]
from collections import Counter
ncommits = len(check_output(cmd).splitlines())
all_authors = check_output(author_cmd).splitlines()
counter = Counter([regularize_identity(author) for author in all_authors])
# global_counter += counter
# counters.update({'.'.join((package,subpackage)): counter})
unique_authors = sorted(set(all_authors))
unique_authors = counter.keys()
unique_authors
Explanation: List Contributors
Some of our contributors have many aliases for the same identity. So, we've added a mapping to make sure that individuals are listed once (and only once).
End of explanation
from datetime import datetime, timedelta
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
PER_PAGE = 100
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def parse_link_header(headers):
link_s = headers.get('link', '')
urls = element_pat.findall(link_s)
rels = rel_pat.findall(link_s)
d = {}
for rel,url in zip(rels, urls):
d[rel] = url
return d
def get_paged_request(url):
get a full list, handling APIv3's paging
results = []
while url:
#print("fetching %s" % url, file=sys.stderr)
f = urlopen(url)
results.extend(json.load(f))
links = parse_link_header(f.headers)
url = links.get('next')
return results
def get_issues(project="pysal/pysal", state="closed", pulls=False):
Get a list of the issues from the Github API.
which = 'pulls' if pulls else 'issues'
url = "https://api.github.com/repos/%s/%s?state=%s&per_page=%i" % (project, which, state, PER_PAGE)
return get_paged_request(url)
def _parse_datetime(s):
Parse dates in the format returned by the Github API.
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0)
def issues2dict(issues):
Convert a list of issues to a dict, keyed by issue number.
idict = {}
for i in issues:
idict[i['number']] = i
return idict
def is_pull_request(issue):
Return True if the given issue is a pull request.
return 'pull_request_url' in issue
def issues_closed_since(period=timedelta(days=365), project="pysal/pysal", pulls=False):
Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present.
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
period = datetime.now() - period
url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, period.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url)
# allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period)
filtered = [i for i in allclosed if _parse_datetime(i['closed_at']) > period]
# exclude rejected PRs
if pulls:
filtered = [ pr for pr in filtered if pr['merged_at'] ]
return filtered
def sorted_by_field(issues, field='closed_at', reverse=False):
Return a list of issues sorted by closing date date.
return sorted(issues, key = lambda i:i[field], reverse=reverse)
def report(issues, show_urls=False):
Summary report about a list of issues, printing number and title.
# titles may have unicode in them, so we must encode everything below
if show_urls:
for i in issues:
role = 'ghpull' if 'merged_at' in i else 'ghissue'
print('* :%s:`%d`: %s' % (role, i['number'],
i['title'].encode('utf-8')))
else:
for i in issues:
print('* %d: %s' % (i['number'], i['title'].encode('utf-8')))
all_issues = {}
all_pulls = {}
total_commits = 0
#prj='pysal/libpysal'
prj = 'pysal/{package}'.format(package=package_name)
issues = issues_closed_since(since, project=prj,pulls=False)
pulls = issues_closed_since(since, project=prj,pulls=True)
issues = sorted_by_field(issues, reverse=True)
pulls = sorted_by_field(pulls, reverse=True)
n_issues, n_pulls = map(len, (issues, pulls))
n_total = n_issues + n_pulls
issue_listing = []
for issue in issues:
entry = "{title} (#{number})".format(title=issue['title'],number=issue['number'])
issue_listing.append(entry)
pull_listing = []
for pull in pulls:
entry = "{title} (#{number})".format(title=pull['title'],number=pull['number'])
pull_listing.append(entry)
pull_listing
message = "We closed a total of {total} issues (enhancements and bug fixes) through {pr} pull requests".format(total=n_total, pr=n_pulls)
message = "{msg}, since our last release on {previous}.".format(msg=message, previous=str(start_date))
message
message += "\n\n## Issues Closed\n"
print(message)
issues = "\n".join([" - "+issue for issue in issue_listing])
message += issues
message += "\n\n## Pull Requests\n"
pulls = "\n".join([" - "+pull for pull in pull_listing])
message += pulls
print(message)
people = "\n".join([" - "+person for person in unique_authors])
print(people)
message +="\n\nThe following individuals contributed to this release:\n\n{people}".format(people=people)
print(message)
head = "# Changes\n\nVersion {version} ({release_date})\n\n".format(version=__version__, release_date=release_date)
print(head+message)
outfile = 'changelog_{version}.md'.format(version=__version__)
with open(outfile, 'w') as of:
of.write(head+message)
Explanation: Disaggregate by PR, Issue
End of explanation |
14,777 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Lecture
Step1: When to use python? -- 50xp, Status
Step3: Python as a calculator -- 100xp, Status
Step5: Lecture
Step7: 2. Calculations with variables
Remember how you calculated the money you ended up with after 7 years of investing $100? You did something like this
Step9: 3. Other variable types
In the previous exercise, you worked with two Python data types
Step10: 4. Guess the type
To find out the type of a value or a variable that refers to that value, you can use the type() function. Suppose you've defined a variable a, but you forgot the type of this variable. To determine the type of `a, simply execute
Step12: 5. Operations with other types
Different types behave differently in python,
Sum of two or more strings will be a "concatenation" ( pasting ) of two or more string together.
Sum of two or more int is an int
Sum of two or more floats is a float, except some special situations.
Sum of two or more bool is a bool.
Step14: 6. Type conversion
Using the + operator to paste together two strings can be very useful in building custom messages.
Suppose for example that you've calculated the return of your investment, and want to summarize the results in a string. Assuming the floats savings and result are defined, you can try something like this
Step15: 7. Can Python handle everything?
Following are some code snipptes, try to find which one is correct | Python Code:
# working with print function
print(5 / 8)
# Add another print function on new line
print(7 + 10)
Explanation: Lecture : Hello Python!
[RQ-1] : Which of the following statements is correct?
Ans: The Ipython Shell is typically used to work with Python interactively.
[RQ-2] : Which file extension is used for Python script files?**
Ans: .py
[RQ-3] : You need to print the result of adding 3 and 4 inside a script. Which line of code should you write in the script?
Ans: print(int x + int y)
Lab : Hello Python!
Objective :
How to work with Ipython shell.
Writing python scripts.
The Python Interface -- 100xp, Status : Earned
End of explanation
# Just testing division
print(5 / 8)
# Additon works too ( added comment here )
print(7 + 10)
Explanation: When to use python? -- 50xp, Status : Earned
Python is a pretty versatile language. For what applications can you use Python?
Ans: All of the above
Any comments? -- 100xp, Satatus : Earned
We can add comments to python scripts.
Comments are short snippets of plain english, to help you and others understand what the code is about.
To add a comment, use '#'tag, insert it at the front of the text.
Comments have idle state, i.e. they don't affect the code results.
Comments are ignored by the python interpretor.
End of explanation
Suppose you have $100, which you can invest with a 10% return each year. After one year, it's
100 x 1.1 = 110 dollars, and after two years it's 100 x 1.1 x 1.1 = 121.
Add code to calculate how much money you end up with after 7 years
print(5 + 5)
print(5 - 5)
# Multiplication and division
print(3 * 5)
print(10 / 2)
# Exponentiation
print(4 ** 2)
# Modulo
print(18 % 7)
# How much is your $100 worth after 7 years?
# first try was unsuccesful, so used the only two things * and ** operators.
print ( 100 * ( 1.1 ** 7 ) )
Explanation: Python as a calculator -- 100xp, Status : Earned
Python is perfectly suited to do basic calculations. Apart from addition, subtraction, multiplication and division, there is also support for more advanced operations such as:
Exponentiation:. This operator raises the number to its left to the power of the number to its right: for example 42 will give 16.
Modulo: %. It returns the remainder of the division of the number to the left by the number on its right, for example 18 % 7 equals 4.
End of explanation
Instructions :
* Create a variable savings with the value 100.
* Check out this variable by typing 'print( savings )` in the script.
# Create a variable savings
savings = 100
# Print out savings
print("savings: ", savings )
Explanation: Lecture : Variables and Types
[RQ1] : Which line of code creates a variable x with the value '15'?
Ans: x = 15 ( inline code used ).
[RQ2] : What is the value of the variable z after executing these commands?
x = 15
y = 7
z = x + y + 1
Ans: 13
[RQ3] : You execute the following two lines of Python code:
x = "test"
y = False
Ans: x is a string, and y is a boolean.
Lab : Variables and Types
Objective:
Creating Variables.
Performing Calculations on them.
Understand different data types that python offers.
Variable Assignment, earned credit : 100xp
Calculations with variables, earned credit : 100xp
Other variable types, earned credit : 100xp
Guess the type, earned credit : 50xp
Operations with the other types, earned credit : 100xp
Type conversion, earned credit : 100xp
Can Python handle everything?, earned credit : 50xp
1. Variable Assignment
In Python, a variable allows you to refer to a value with a name. To create a variable use =, like this example:
x = 5
You can now use the name of this varibale x, instead of the actual value, 5.
End of explanation
Instructions :
* Create a variable factor, equal to 1.10.
* Use savings and factor to calculate the amount of money,
you end up with after 7 years. Store the result in a new variable,
result.
# Create a variable savings
savings = 100
# Create a variable factor
factor = 1.10
# Calculate result
result = savings * ( factor ** 7 )
# Print out result
print( result )
Explanation: 2. Calculations with variables
Remember how you calculated the money you ended up with after 7 years of investing $100? You did something like this:
100 * 1.10 ** 7
Instead of calculating with the actual values, you can use variables instead. The savings variable you've created in the previous exercise represents the $100 you started with. It's up to you to create a new variable to represent 1.10 and then redo the calculations!
End of explanation
Instructions :
* Create a new string, 'desc', with the value "compound interest".
* Create a new boolean, 'profitable', with the value 'True'.
# Create a variable desc
desc = "compound interest"
# Create a variable profitable
profitable = True
Explanation: 3. Other variable types
In the previous exercise, you worked with two Python data types:
int, or integer: a number without a fractional part.
savings, with the value 100, is an example of an integer.
float, or floating point: a number that has both an integer and fractional part, separated by a point. factor, with the value 1.10, is an example of a float.
Next to numerical data types, there are two other very common data types:
str, or string: a type to represent text. You can use single or double quotes to build a string.
bool, or boolean: a type to represent logical values. Can only be True or False.
End of explanation
a = 10.21
b = "Python's fun!"
c = False
# Check there types:
type(a)
type(b)
type(c)
Explanation: 4. Guess the type
To find out the type of a value or a variable that refers to that value, you can use the type() function. Suppose you've defined a variable a, but you forgot the type of this variable. To determine the type of `a, simply execute:
type(a)
End of explanation
Instructions :
+ Calculate the product of 'savings' and 'factor'.
- Store the result in 'yearl'.
+ What do you think the resulting type will be?
- Find out by printing out the type of 'yearl'
+ Calculate the sum of 'desc' and 'desc'.
- Store the result in new variable 'doubledesc'.
+ Print out 'doubledesc'.
- Did you expect this?
# Several variables to experiment with
savings = 100
factor = 1.1
desc = "compound interest"
# Assign product of factor and savings to year1
year1 = savings * factor
# Print the type of year1
print( type( yearl ) )
# Assign sum of desc and desc to doubledesc
doubledesc = desc + desc
# Print out doubledesc
print( doubledesc )
Explanation: 5. Operations with other types
Different types behave differently in python,
Sum of two or more strings will be a "concatenation" ( pasting ) of two or more string together.
Sum of two or more int is an int
Sum of two or more floats is a float, except some special situations.
Sum of two or more bool is a bool.
End of explanation
Instructions:
+ First run the code, and identify the errors.
+ Next, fix those errors by appropritate type conversion functions.
+ Convert variable 'pi_string' --> 'float', as a new variable 'pi_float'.
# Definition of savings and result
savings = 100
result = 100 * 1.10 ** 7
# Fix the printout
print("I started with $" + savings + " and now have $" + result + ". Awesome!")
# Definition of pi_string
pi_string = "3.1415926"
# Convert pi_string into float: pi_float
# Fixed code
# Definition of savings and result
savings = 100
result = 100 * 1.10 ** 7
# Fix the printout
print("I started with $" + str( savings ) + " and now have $" + str( result ) + ". Awesome!")
# Definition of pi_string
pi_string = "3.1415926"
# Convert pi_string into float: pi_float
pi_float = float(pi_string)
Explanation: 6. Type conversion
Using the + operator to paste together two strings can be very useful in building custom messages.
Suppose for example that you've calculated the return of your investment, and want to summarize the results in a string. Assuming the floats savings and result are defined, you can try something like this:
print("I started with $" + savings + " and now have $" + result + ". Awesome!")
note: But we cannot sum strings and floats, so above code will return an error.
To fix the error:
Need to explicitly convert the types of your variables.
Use a str() to convert a value into a string.
Similerly, use int(), float and bool() for desired conversion types.
End of explanation
print( "I can add integers, like " + str(5) + " to strings." )
print( "I said " + ("Hey " * 2) + "Hey!" )
# Error
("The correct answer to this multiple choice exercise is answer number " + 2)
print( True + False )
Explanation: 7. Can Python handle everything?
Following are some code snipptes, try to find which one is correct:
"I can add integers, like" + str(5) + "to strings."
"I said " + ("Hey " * 2) + "Hey!"
"The correct answer to this multiple choice exercise is answer number " + 2
True + False
End of explanation |
14,778 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Object Model
bqplot is based on Grammar of Graphics paradigm. The Object Model in bqplot gives the user the full flexibility to build custom plots. This means the API is verbose but fully customizable.
The following are the steps to build a Figure in bqplot using the Object Model
Step1: For creating other marks (like scatter, pie, bars, etc.), only step 3 needs to be changed. Lets look a simple example to create a bar chart
Step2: Mutiple marks can be rendered in a figure. It's as easy as passing a list of marks when constructing the Figure object | Python Code:
from bqplot import (LinearScale, Axis, Figure, OrdinalScale,
LinearScale, Bars, Lines, Scatter)
# first, let's create two vectors x and y to plot using a Lines mark
import numpy as np
x = np.linspace(-10, 10, 100)
y = np.sin(x)
# 1. Create the scales
xs = LinearScale()
ys = LinearScale()
# 2. Create the axes for x and y
xax = Axis(scale=xs, label='X')
yax = Axis(scale=ys, orientation='vertical', label='Y')
# 3. Create a Lines mark by passing in the scales
# note that Lines object is stored in `line` which can be used later to update the plot
line = Lines(x=x, y=y, scales={'x': xs, 'y': ys})
# 4. Create a Figure object by assembling marks and axes
fig = Figure(marks=[line], axes=[xax, yax], title='Simple Line Chart')
# 5. Render the figure using display or just as is
fig
Explanation: Object Model
bqplot is based on Grammar of Graphics paradigm. The Object Model in bqplot gives the user the full flexibility to build custom plots. This means the API is verbose but fully customizable.
The following are the steps to build a Figure in bqplot using the Object Model:
Build the scales for x and y quantities using the Scale classes (Scales map the data into pixels in the figure)
Build the marks using the Mark classes. Marks represent the core plotting objects (lines, scatter, bars, pies etc.). Marks take the scale objects created in step 1 as arguments
Build the axes for x and y scales
Finally create a figure using Figure class. Figure takes marks and axes as inputs. Figure object is a widget (it inherits from DOMWidget) and can be rendered like any other jupyter widget
Let's look a simple example to understand these concepts:
End of explanation
# first, let's create two vectors x and y to plot a bar chart
x = list('ABCDE')
y = np.random.rand(5)
# 1. Create the scales
xs = OrdinalScale() # note the use of ordinal scale to represent categorical data
ys = LinearScale()
# 2. Create the axes for x and y
xax = Axis(scale=xs, label='X', grid_lines='none') # no grid lines needed for x
yax = Axis(scale=ys, orientation='vertical', label='Y', tick_format='.0%') # note the use of tick_format to format ticks
# 3. Create a Bars mark by passing in the scales
# note that Bars object is stored in `bar` object which can be used later to update the plot
bar = Bars(x=x, y=y, scales={'x': xs, 'y': ys}, padding=.2)
# 4. Create a Figure object by assembling marks and axes
Figure(marks=[bar], axes=[xax, yax], title='Simple Bar Chart')
Explanation: For creating other marks (like scatter, pie, bars, etc.), only step 3 needs to be changed. Lets look a simple example to create a bar chart:
End of explanation
# first, let's create two vectors x and y
import numpy as np
x = np.linspace(-10, 10, 25)
y = 3 * x + 5
y_noise = y + 10 * np.random.randn(25) # add some random noise to y
# 1. Create the scales
xs = LinearScale()
ys = LinearScale()
# 2. Create the axes for x and y
xax = Axis(scale=xs, label='X')
yax = Axis(scale=ys, orientation='vertical', label='Y')
# 3. Create a Lines and Scatter marks by passing in the scales
# additional attributes (stroke_width, colors etc.) can be passed as attributes to the mark objects as needed
line = Lines(x=x, y=y, scales={'x': xs, 'y': ys}, colors=['green'], stroke_width=3)
scatter = Scatter(x=x, y=y_noise, scales={'x': xs, 'y': ys}, colors=['red'], stroke='black')
# 4. Create a Figure object by assembling marks and axes
# pass both the marks (line and scatter) as a list to the marks attribute
Figure(marks=[line, scatter], axes=[xax, yax], title='Scatter and Line')
Explanation: Mutiple marks can be rendered in a figure. It's as easy as passing a list of marks when constructing the Figure object
End of explanation |
14,779 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Character level language model - Dinosaurus land
Welcome to Dinosaurus Island! 65 million years ago, dinosaurs existed, and in this assignment they are back. You are in charge of a special task. Leading biology researchers are creating new breeds of dinosaurs and bringing them to life on earth, and your job is to give names to these dinosaurs. If a dinosaur does not like its name, it might go beserk, so choose wisely!
<table>
<td>
<img src="images/dino.jpg" style="width
Step1: 1 - Problem Statement
1.1 - Dataset and Preprocessing
Run the following cell to read the dataset of dinosaur names, create a list of unique characters (such as a-z), and compute the dataset and vocabulary size.
Step2: The characters are a-z (26 characters) plus the "\n" (or newline character), which in this assignment plays a role similar to the <EOS> (or "End of sentence") token we had discussed in lecture, only here it indicates the end of the dinosaur name rather than the end of a sentence. In the cell below, we create a python dictionary (i.e., a hash table) to map each character to an index from 0-26. We also create a second python dictionary that maps each index back to the corresponding character character. This will help you figure out what index corresponds to what character in the probability distribution output of the softmax layer. Below, char_to_ix and ix_to_char are the python dictionaries.
Step3: 1.2 - Overview of the model
Your model will have the following structure
Step5: Expected output
Step7: Expected output
Step12: Expected output
Step13: Run the following cell, you should observe your model outputting random-looking characters at the first iteration. After a few thousand iterations, your model should learn to generate reasonable-looking names.
Step14: Conclusion
You can see that your algorithm has started to generate plausible dinosaur names towards the end of the training. At first, it was generating random characters, but towards the end you could see dinosaur names with cool endings. Feel free to run the algorithm even longer and play with hyperparameters to see if you can get even better results. Our implemetation generated some really cool names like maconucon, marloralus and macingsersaurus. Your model hopefully also learned that dinosaur names tend to end in saurus, don, aura, tor, etc.
If your model generates some non-cool names, don't blame the model entirely--not all actual dinosaur names sound cool. (For example, dromaeosauroides is an actual dinosaur name and is in the training set.) But this model should give you a set of candidates from which you can pick the coolest!
This assignment had used a relatively small dataset, so that you could train an RNN quickly on a CPU. Training a model of the english language requires a much bigger dataset, and usually needs much more computation, and could run for many hours on GPUs. We ran our dinosaur name for quite some time, and so far our favoriate name is the great, undefeatable, and fierce
Step15: To save you some time, we have already trained a model for ~1000 epochs on a collection of Shakespearian poems called "The Sonnets".
Let's train the model for one more epoch. When it finishes training for an epoch---this will also take a few minutes---you can run generate_output, which will prompt asking you for an input (<40 characters). The poem will start with your sentence, and our RNN-Shakespeare will complete the rest of the poem for you! For example, try "Forsooth this maketh no sense " (don't enter the quotation marks). Depending on whether you include the space at the end, your results might also differ--try it both ways, and try other inputs as well. | Python Code:
import numpy as np
from utils import *
import random
Explanation: Character level language model - Dinosaurus land
Welcome to Dinosaurus Island! 65 million years ago, dinosaurs existed, and in this assignment they are back. You are in charge of a special task. Leading biology researchers are creating new breeds of dinosaurs and bringing them to life on earth, and your job is to give names to these dinosaurs. If a dinosaur does not like its name, it might go beserk, so choose wisely!
<table>
<td>
<img src="images/dino.jpg" style="width:250;height:300px;">
</td>
</table>
Luckily you have learned some deep learning and you will use it to save the day. Your assistant has collected a list of all the dinosaur names they could find, and compiled them into this dataset. (Feel free to take a look by clicking the previous link.) To create new dinosaur names, you will build a character level language model to generate new names. Your algorithm will learn the different name patterns, and randomly generate new names. Hopefully this algorithm will keep you and your team safe from the dinosaurs' wrath!
By completing this assignment you will learn:
How to store text data for processing using an RNN
How to synthesize data, by sampling predictions at each time step and passing it to the next RNN-cell unit
How to build a character-level text generation recurrent neural network
Why clipping the gradients is important
We will begin by loading in some functions that we have provided for you in rnn_utils. Specifically, you have access to functions such as rnn_forward and rnn_backward which are equivalent to those you've implemented in the previous assignment.
End of explanation
data = open('dinos.txt', 'r').read()
data= data.lower()
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print('There are %d total characters and %d unique characters in your data.' % (data_size, vocab_size))
Explanation: 1 - Problem Statement
1.1 - Dataset and Preprocessing
Run the following cell to read the dataset of dinosaur names, create a list of unique characters (such as a-z), and compute the dataset and vocabulary size.
End of explanation
char_to_ix = { ch:i for i,ch in enumerate(sorted(chars)) }
ix_to_char = { i:ch for i,ch in enumerate(sorted(chars)) }
print(ix_to_char)
Explanation: The characters are a-z (26 characters) plus the "\n" (or newline character), which in this assignment plays a role similar to the <EOS> (or "End of sentence") token we had discussed in lecture, only here it indicates the end of the dinosaur name rather than the end of a sentence. In the cell below, we create a python dictionary (i.e., a hash table) to map each character to an index from 0-26. We also create a second python dictionary that maps each index back to the corresponding character character. This will help you figure out what index corresponds to what character in the probability distribution output of the softmax layer. Below, char_to_ix and ix_to_char are the python dictionaries.
End of explanation
### GRADED FUNCTION: clip
def clip(gradients, maxValue):
'''
Clips the gradients' values between minimum and maximum.
Arguments:
gradients -- a dictionary containing the gradients "dWaa", "dWax", "dWya", "db", "dby"
maxValue -- everything above this number is set to this number, and everything less than -maxValue is set to -maxValue
Returns:
gradients -- a dictionary with the clipped gradients.
'''
dWaa, dWax, dWya, db, dby = gradients['dWaa'], gradients['dWax'], gradients['dWya'], gradients['db'], gradients['dby']
### START CODE HERE ###
# clip to mitigate exploding gradients, loop over [dWax, dWaa, dWya, db, dby]. (≈2 lines)
for gradient in [dWax, dWaa, dWya, db, dby]:
np.clip(gradient, -maxValue, maxValue, out=gradient)
### END CODE HERE ###
gradients = {"dWaa": dWaa, "dWax": dWax, "dWya": dWya, "db": db, "dby": dby}
return gradients
np.random.seed(3)
dWax = np.random.randn(5,3)*10
dWaa = np.random.randn(5,5)*10
dWya = np.random.randn(2,5)*10
db = np.random.randn(5,1)*10
dby = np.random.randn(2,1)*10
gradients = {"dWax": dWax, "dWaa": dWaa, "dWya": dWya, "db": db, "dby": dby}
gradients = clip(gradients, 10)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("gradients[\"dWax\"][3][1] =", gradients["dWax"][3][1])
print("gradients[\"dWya\"][1][2] =", gradients["dWya"][1][2])
print("gradients[\"db\"][4] =", gradients["db"][4])
print("gradients[\"dby\"][1] =", gradients["dby"][1])
Explanation: 1.2 - Overview of the model
Your model will have the following structure:
Initialize parameters
Run the optimization loop
Forward propagation to compute the loss function
Backward propagation to compute the gradients with respect to the loss function
Clip the gradients to avoid exploding gradients
Using the gradients, update your parameter with the gradient descent update rule.
Return the learned parameters
<img src="images/rnn.png" style="width:450;height:300px;">
<caption><center> Figure 1: Recurrent Neural Network, similar to what you had built in the previous notebook "Building a RNN - Step by Step". </center></caption>
At each time-step, the RNN tries to predict what is the next character given the previous characters. The dataset $X = (x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, ..., x^{\langle T_x \rangle})$ is a list of characters in the training set, while $Y = (y^{\langle 1 \rangle}, y^{\langle 2 \rangle}, ..., y^{\langle T_x \rangle})$ is such that at every time-step $t$, we have $y^{\langle t \rangle} = x^{\langle t+1 \rangle}$.
2 - Building blocks of the model
In this part, you will build two important blocks of the overall model:
- Gradient clipping: to avoid exploding gradients
- Sampling: a technique used to generate characters
You will then apply these two functions to build the model.
2.1 - Clipping the gradients in the optimization loop
In this section you will implement the clip function that you will call inside of your optimization loop. Recall that your overall loop structure usually consists of a forward pass, a cost computation, a backward pass, and a parameter update. Before updating the parameters, you will perform gradient clipping when needed to make sure that your gradients are not "exploding," meaning taking on overly large values.
In the exercise below, you will implement a function clip that takes in a dictionary of gradients and returns a clipped version of gradients if needed. There are different ways to clip gradients; we will use a simple element-wise clipping procedure, in which every element of the gradient vector is clipped to lie between some range [-N, N]. More generally, you will provide a maxValue (say 10). In this example, if any component of the gradient vector is greater than 10, it would be set to 10; and if any component of the gradient vector is less than -10, it would be set to -10. If it is between -10 and 10, it is left alone.
<img src="images/clip.png" style="width:400;height:150px;">
<caption><center> Figure 2: Visualization of gradient descent with and without gradient clipping, in a case where the network is running into slight "exploding gradient" problems. </center></caption>
Exercise: Implement the function below to return the clipped gradients of your dictionary gradients. Your function takes in a maximum threshold and returns the clipped versions of your gradients. You can check out this hint for examples of how to clip in numpy. You will need to use the argument out = ....
End of explanation
# GRADED FUNCTION: sample
def sample(parameters, char_to_ix, seed):
Sample a sequence of characters according to a sequence of probability distributions output of the RNN
Arguments:
parameters -- python dictionary containing the parameters Waa, Wax, Wya, by, and b.
char_to_ix -- python dictionary mapping each character to an index.
seed -- used for grading purposes. Do not worry about it.
Returns:
indices -- a list of length n containing the indices of the sampled characters.
# Retrieve parameters and relevant shapes from "parameters" dictionary
Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b']
vocab_size = by.shape[0]
n_a = Waa.shape[1]
### START CODE HERE ###
# Step 1: Create the one-hot vector x for the first character (initializing the sequence generation). (≈1 line)
x = np.zeros([vocab_size,1])
# Step 1': Initialize a_prev as zeros (≈1 line)
a_prev = np.zeros([n_a,1])
# Create an empty list of indices, this is the list which will contain the list of indices of the characters to generate (≈1 line)
indices = []
# Idx is a flag to detect a newline character, we initialize it to -1
idx = -1
# Loop over time-steps t. At each time-step, sample a character from a probability distribution and append
# its index to "indices". We'll stop if we reach 50 characters (which should be very unlikely with a well
# trained model), which helps debugging and prevents entering an infinite loop.
counter = 0
newline_character = char_to_ix['\n']
while (idx != newline_character and counter != 50):
# Step 2: Forward propagate x using the equations (1), (2) and (3)
a = np.tanh(np.dot(Wax, x) + np.dot(Waa, a_prev) + b)
z = np.dot(Wya, a) + by
y = softmax(z)
# for grading purposes
np.random.seed(counter+seed)
# Step 3: Sample the index of a character within the vocabulary from the probability distribution y
idx = np.random.choice([x for x in range(vocab_size)], p=y.ravel())
# Append the index to "indices"
indices.append(idx)
# Step 4: Overwrite the input character as the one corresponding to the sampled index.
x = np.zeros([vocab_size,1])
x[idx] = 1
# Update "a_prev" to be "a"
a_prev = a
# for grading purposes
seed += 1
counter +=1
### END CODE HERE ###
if (counter == 50):
indices.append(char_to_ix['\n'])
return indices
np.random.seed(2)
_, n_a = 20, 100
Wax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)
b, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b, "by": by}
indices = sample(parameters, char_to_ix, 0)
print("Sampling:")
print("list of sampled indices:", indices)
print("list of sampled characters:", [ix_to_char[i] for i in indices])
Explanation: Expected output:
<table>
<tr>
<td>
**gradients["dWaa"][1][2] **
</td>
<td>
10.0
</td>
</tr>
<tr>
<td>
**gradients["dWax"][3][1]**
</td>
<td>
-10.0
</td>
</td>
</tr>
<tr>
<td>
**gradients["dWya"][1][2]**
</td>
<td>
0.29713815361
</td>
</tr>
<tr>
<td>
**gradients["db"][4]**
</td>
<td>
[ 10.]
</td>
</tr>
<tr>
<td>
**gradients["dby"][1]**
</td>
<td>
[ 8.45833407]
</td>
</tr>
</table>
2.2 - Sampling
Now assume that your model is trained. You would like to generate new text (characters). The process of generation is explained in the picture below:
<img src="images/dinos3.png" style="width:500;height:300px;">
<caption><center> Figure 3: In this picture, we assume the model is already trained. We pass in $x^{\langle 1\rangle} = \vec{0}$ at the first time step, and have the network then sample one character at a time. </center></caption>
Exercise: Implement the sample function below to sample characters. You need to carry out 4 steps:
Step 1: Pass the network the first "dummy" input $x^{\langle 1 \rangle} = \vec{0}$ (the vector of zeros). This is the default input before we've generated any characters. We also set $a^{\langle 0 \rangle} = \vec{0}$
Step 2: Run one step of forward propagation to get $a^{\langle 1 \rangle}$ and $\hat{y}^{\langle 1 \rangle}$. Here are the equations:
$$ a^{\langle t+1 \rangle} = \tanh(W_{ax} x^{\langle t \rangle } + W_{aa} a^{\langle t \rangle } + b)\tag{1}$$
$$ z^{\langle t + 1 \rangle } = W_{ya} a^{\langle t + 1 \rangle } + b_y \tag{2}$$
$$ \hat{y}^{\langle t+1 \rangle } = softmax(z^{\langle t + 1 \rangle })\tag{3}$$
Note that $\hat{y}^{\langle t+1 \rangle }$ is a (softmax) probability vector (its entries are between 0 and 1 and sum to 1). $\hat{y}^{\langle t+1 \rangle}_i$ represents the probability that the character indexed by "i" is the next character. We have provided a softmax() function that you can use.
Step 3: Carry out sampling: Pick the next character's index according to the probability distribution specified by $\hat{y}^{\langle t+1 \rangle }$. This means that if $\hat{y}^{\langle t+1 \rangle }_i = 0.16$, you will pick the index "i" with 16% probability. To implement it, you can use np.random.choice.
Here is an example of how to use np.random.choice():
python
np.random.seed(0)
p = np.array([0.1, 0.0, 0.7, 0.2])
index = np.random.choice([0, 1, 2, 3], p = p.ravel())
This means that you will pick the index according to the distribution:
$P(index = 0) = 0.1, P(index = 1) = 0.0, P(index = 2) = 0.7, P(index = 3) = 0.2$.
Step 4: The last step to implement in sample() is to overwrite the variable x, which currently stores $x^{\langle t \rangle }$, with the value of $x^{\langle t + 1 \rangle }$. You will represent $x^{\langle t + 1 \rangle }$ by creating a one-hot vector corresponding to the character you've chosen as your prediction. You will then forward propagate $x^{\langle t + 1 \rangle }$ in Step 1 and keep repeating the process until you get a "\n" character, indicating you've reached the end of the dinosaur name.
End of explanation
# GRADED FUNCTION: optimize
def optimize(X, Y, a_prev, parameters, learning_rate = 0.01):
Execute one step of the optimization to train the model.
Arguments:
X -- list of integers, where each integer is a number that maps to a character in the vocabulary.
Y -- list of integers, exactly the same as X but shifted one index to the left.
a_prev -- previous hidden state.
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
b -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
learning_rate -- learning rate for the model.
Returns:
loss -- value of the loss function (cross-entropy)
gradients -- python dictionary containing:
dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)
dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)
dWya -- Gradients of hidden-to-output weights, of shape (n_y, n_a)
db -- Gradients of bias vector, of shape (n_a, 1)
dby -- Gradients of output bias vector, of shape (n_y, 1)
a[len(X)-1] -- the last hidden state, of shape (n_a, 1)
### START CODE HERE ###
# Forward propagate through time (≈1 line)
loss, cache = rnn_forward(X, Y, a_prev, parameters)
# Backpropagate through time (≈1 line)
gradients, a = rnn_backward(X, Y, parameters, cache)
# Clip your gradients between -5 (min) and 5 (max) (≈1 line)
gradients = clip(gradients=gradients,maxValue=5)
# Update parameters (≈1 line)
parameters = update_parameters(parameters, gradients, learning_rate)
### END CODE HERE ###
return loss, gradients, a[len(X)-1]
np.random.seed(1)
vocab_size, n_a = 27, 100
a_prev = np.random.randn(n_a, 1)
Wax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a)
b, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b, "by": by}
X = [12,3,5,11,22,3]
Y = [4,14,11,22,25, 26]
loss, gradients, a_last = optimize(X, Y, a_prev, parameters, learning_rate = 0.01)
print("Loss =", loss)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("np.argmax(gradients[\"dWax\"]) =", np.argmax(gradients["dWax"]))
print("gradients[\"dWya\"][1][2] =", gradients["dWya"][1][2])
print("gradients[\"db\"][4] =", gradients["db"][4])
print("gradients[\"dby\"][1] =", gradients["dby"][1])
print("a_last[4] =", a_last[4])
Explanation: Expected output:
<table>
<tr>
<td>
**list of sampled indices:**
</td>
<td>
[12, 17, 24, 14, 13, 9, 10, 22, 24, 6, 13, 11, 12, 6, 21, 15, 21, 14, 3, 2, 1, 21, 18, 24, <br>
7, 25, 6, 25, 18, 10, 16, 2, 3, 8, 15, 12, 11, 7, 1, 12, 10, 2, 7, 7, 11, 5, 6, 12, 25, 0, 0]
</td>
</tr><tr>
<td>
**list of sampled characters:**
</td>
<td>
['l', 'q', 'x', 'n', 'm', 'i', 'j', 'v', 'x', 'f', 'm', 'k', 'l', 'f', 'u', 'o', <br>
'u', 'n', 'c', 'b', 'a', 'u', 'r', 'x', 'g', 'y', 'f', 'y', 'r', 'j', 'p', 'b', 'c', 'h', 'o', <br>
'l', 'k', 'g', 'a', 'l', 'j', 'b', 'g', 'g', 'k', 'e', 'f', 'l', 'y', '\n', '\n']
</td>
</tr>
</table>
3 - Building the language model
It is time to build the character-level language model for text generation.
3.1 - Gradient descent
In this section you will implement a function performing one step of stochastic gradient descent (with clipped gradients). You will go through the training examples one at a time, so the optimization algorithm will be stochastic gradient descent. As a reminder, here are the steps of a common optimization loop for an RNN:
Forward propagate through the RNN to compute the loss
Backward propagate through time to compute the gradients of the loss with respect to the parameters
Clip the gradients if necessary
Update your parameters using gradient descent
Exercise: Implement this optimization process (one step of stochastic gradient descent).
We provide you with the following functions:
```python
def rnn_forward(X, Y, a_prev, parameters):
Performs the forward propagation through the RNN and computes the cross-entropy loss.
It returns the loss' value as well as a "cache" storing values to be used in the backpropagation.
....
return loss, cache
def rnn_backward(X, Y, parameters, cache):
Performs the backward propagation through time to compute the gradients of the loss with respect
to the parameters. It returns also all the hidden states.
...
return gradients, a
def update_parameters(parameters, gradients, learning_rate):
Updates parameters using the Gradient Descent Update Rule.
...
return parameters
```
End of explanation
# GRADED FUNCTION: model
def model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27):
Trains the model and generates dinosaur names.
Arguments:
data -- text corpus
ix_to_char -- dictionary that maps the index to a character
char_to_ix -- dictionary that maps a character to an index
num_iterations -- number of iterations to train the model for
n_a -- number of units of the RNN cell
dino_names -- number of dinosaur names you want to sample at each iteration.
vocab_size -- number of unique characters found in the text, size of the vocabulary
Returns:
parameters -- learned parameters
# Retrieve n_x and n_y from vocab_size
n_x, n_y = vocab_size, vocab_size
# Initialize parameters
parameters = initialize_parameters(n_a, n_x, n_y)
# Initialize loss (this is required because we want to smooth our loss, don't worry about it)
loss = get_initial_loss(vocab_size, dino_names)
# Build list of all dinosaur names (training examples).
with open("dinos.txt") as f:
examples = f.readlines()
examples = [x.lower().strip() for x in examples]
# Shuffle list of all dinosaur names
np.random.seed(0)
np.random.shuffle(examples)
# Initialize the hidden state of your LSTM
a_prev = np.zeros((n_a, 1))
# Optimization loop
for j in range(1,num_iterations+1):
### START CODE HERE ###
# Use the hint above to define one training example (X,Y) (≈ 2 lines)
index = j % len(examples)
X = [None] + [char_to_ix[ch] for ch in examples[index]]
Y = X[1:] + [char_to_ix["\n"]]
# Perform one optimization step: Forward-prop -> Backward-prop -> Clip -> Update parameters
# Choose a learning rate of 0.01
curr_loss, gradients, a_prev = optimize(X, Y, a_prev, parameters, learning_rate = 0.01)
### END CODE HERE ###
# Use a latency trick to keep the loss smooth. It happens here to accelerate the training.
loss = smooth(loss, curr_loss)
# Every 2000 Iteration, generate "n" characters thanks to sample() to check if the model is learning properly
if j % 2000 == 0:
print('Iteration: %d, Loss: %f' % (j, loss) + '\n')
# The number of dinosaur names to print
seed = 0
for name in range(dino_names):
# Sample indices and print them
sampled_indices = sample(parameters, char_to_ix, seed)
print_sample(sampled_indices, ix_to_char)
seed += 1 # To get the same result for grading purposed, increment the seed by one.
print('\n')
return parameters
Explanation: Expected output:
<table>
<tr>
<td>
**Loss **
</td>
<td>
126.503975722
</td>
</tr>
<tr>
<td>
**gradients["dWaa"][1][2]**
</td>
<td>
0.194709315347
</td>
<tr>
<td>
**np.argmax(gradients["dWax"])**
</td>
<td> 93
</td>
</tr>
<tr>
<td>
**gradients["dWya"][1][2]**
</td>
<td> -0.007773876032
</td>
</tr>
<tr>
<td>
**gradients["db"][4]**
</td>
<td> [-0.06809825]
</td>
</tr>
<tr>
<td>
**gradients["dby"][1]**
</td>
<td>[ 0.01538192]
</td>
</tr>
<tr>
<td>
**a_last[4]**
</td>
<td> [-1.]
</td>
</tr>
</table>
3.2 - Training the model
Given the dataset of dinosaur names, we use each line of the dataset (one name) as one training example. Every 100 steps of stochastic gradient descent, you will sample 10 randomly chosen names to see how the algorithm is doing. Remember to shuffle the dataset, so that stochastic gradient descent visits the examples in random order.
Exercise: Follow the instructions and implement model(). When examples[index] contains one dinosaur name (string), to create an example (X, Y), you can use this:
python
index = j % len(examples)
X = [None] + [char_to_ix[ch] for ch in examples[index]]
Y = X[1:] + [char_to_ix["\n"]]
Note that we use: index= j % len(examples), where j = 1....num_iterations, to make sure that examples[index] is always a valid statement (index is smaller than len(examples)).
The first entry of X being None will be interpreted by rnn_forward() as setting $x^{\langle 0 \rangle} = \vec{0}$. Further, this ensures that Y is equal to X but shifted one step to the left, and with an additional "\n" appended to signify the end of the dinosaur name.
End of explanation
parameters = model(data, ix_to_char, char_to_ix)
Explanation: Run the following cell, you should observe your model outputting random-looking characters at the first iteration. After a few thousand iterations, your model should learn to generate reasonable-looking names.
End of explanation
from __future__ import print_function
from keras.callbacks import LambdaCallback
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking
from keras.layers import LSTM
from keras.utils.data_utils import get_file
from keras.preprocessing.sequence import pad_sequences
from shakespeare_utils import *
import sys
import io
Explanation: Conclusion
You can see that your algorithm has started to generate plausible dinosaur names towards the end of the training. At first, it was generating random characters, but towards the end you could see dinosaur names with cool endings. Feel free to run the algorithm even longer and play with hyperparameters to see if you can get even better results. Our implemetation generated some really cool names like maconucon, marloralus and macingsersaurus. Your model hopefully also learned that dinosaur names tend to end in saurus, don, aura, tor, etc.
If your model generates some non-cool names, don't blame the model entirely--not all actual dinosaur names sound cool. (For example, dromaeosauroides is an actual dinosaur name and is in the training set.) But this model should give you a set of candidates from which you can pick the coolest!
This assignment had used a relatively small dataset, so that you could train an RNN quickly on a CPU. Training a model of the english language requires a much bigger dataset, and usually needs much more computation, and could run for many hours on GPUs. We ran our dinosaur name for quite some time, and so far our favoriate name is the great, undefeatable, and fierce: Mangosaurus!
<img src="images/mangosaurus.jpeg" style="width:250;height:300px;">
4 - Writing like Shakespeare
The rest of this notebook is optional and is not graded, but we hope you'll do it anyway since it's quite fun and informative.
A similar (but more complicated) task is to generate Shakespeare poems. Instead of learning from a dataset of Dinosaur names you can use a collection of Shakespearian poems. Using LSTM cells, you can learn longer term dependencies that span many characters in the text--e.g., where a character appearing somewhere a sequence can influence what should be a different character much much later in ths sequence. These long term dependencies were less important with dinosaur names, since the names were quite short.
<img src="images/shakespeare.jpg" style="width:500;height:400px;">
<caption><center> Let's become poets! </center></caption>
We have implemented a Shakespeare poem generator with Keras. Run the following cell to load the required packages and models. This may take a few minutes.
End of explanation
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
model.fit(x, y, batch_size=128, epochs=1, callbacks=[print_callback])
# Run this cell to try with different inputs without having to re-train the model
generate_output()
Explanation: To save you some time, we have already trained a model for ~1000 epochs on a collection of Shakespearian poems called "The Sonnets".
Let's train the model for one more epoch. When it finishes training for an epoch---this will also take a few minutes---you can run generate_output, which will prompt asking you for an input (<40 characters). The poem will start with your sentence, and our RNN-Shakespeare will complete the rest of the poem for you! For example, try "Forsooth this maketh no sense " (don't enter the quotation marks). Depending on whether you include the space at the end, your results might also differ--try it both ways, and try other inputs as well.
End of explanation |
14,780 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Pandas
Step1: Datafreymin əsasına yerləşəcək verilənlər mənbə üzrə üzrə daxili və xarici formalara bölünür
Step2: Növbəti email_list_lst_cln dəyşəninə isə sütun adlarından ibarət siyahı təhkim edirik.
Step3: Nəhayət, DataFrame-nin "from_records" funksiyasına email_list_lst və email_list_lst_cln dəyərlərini ötürüb email_list_lst dəyərlərindən email_list_lst_cln sütunları ilə cədvəl yaradırıq və sonra cədvəli əks etdiririk.
Step4: Siyahı
Step5: Lüğət
Step6: Burada gördüyünüz kimi məlumat DataFrame daxilinə keçsədə, sütunlar istədiyimiz ardıcıllıqda yox, əlifba ardıcıllığı üzrə sıralanmışdır. Bu məqamı aradan qaldırmaq üçün ya yuxarıda DataFrame yaradan zamandaki kimi əvvəlcədən column parametri vasitəsi ilə sütun adların və ardıcıllığın qeyd etməli, və ya sonradan aşaqda qeyd olunmuş əmr ilə sütun yerlərin dəyşməliyik.
Step7: Lüğət
Step8: Cədvəli yaradaq və sütunların yerlərin dəyişək
Step9: Python xarici mənbələr
Step10: CSV
Yuxarıdaki funksiyadaki kimi ilk öncə .csv fayla yol, amma sonra sətr daxilində dəyərləri bir birindən ayıran işarə delimiter parameterinə ötürülməlidir. Ötürülmədikdə standart olaraq vergülü qəbul olunur.
Step11: JSON
Json faylından verilənləri qəbul etmək üçün URL və ya fayl sistemində fayla yol tələb olunur. Json faylı misalı aşağıda qeyd olunub.
Diqqət ilə baxdığınız halda özünüz üçün json faylın Lüğət
Step12: SQL
Və son olaraq SQLite fayl məlumat bazasından məlumat sorğulayaq və datafreymə yerləşdirək. İlk öncə işimiz üçün tələb olunan modulları import edək.
Step13: Sorğulama üçün engine yaradaq və məlumat bazası faylına yolu göstərək.
Step14: Qoşulma yaradıb, məlumat bazasında yerləşən emails cədvəlindən bütün sətrləri sorğulayaq.
Step15: Məlumat sorğulandıqdan sonra fetchall funksiyası vasitəsi ilə sətrləri "oxuyub" data dəyişkəninə təhkim edək və sonda MB bağlantısın bağlayaq.
Step16: Əldə olunan məlumatın strukturu tanış qəlir mi? Diqqət ilə baxsanız ilk tanış olduğumuz Siyahı | Python Code:
import pandas as pd
Explanation: Pandas: DataFreym yaratmağın müxtəlif üsulları
İlk öncə pandas-da verilənlərin 2D əndazəli forma (sadə dil ilə "cədvəl") daxilində saxlanma vasitəsi və forması olan Dataframe yaratmaqdan başlayaq. Bu dərs tam olaraq müxtəlif mənbələrdən və formalarda əldə edilmiş məlumatı Datafreymə çevirmək yolları haqqında bəhs edəcək.
Pandas ilə iş aparmaq üçün modulu əvvəlcə import edək:
End of explanation
email_list_lst=[('Omar','Bayramov','[email protected]',1),
('Ali','Aliyev','[email protected]',0),
('Dmitry','Vladimirov','[email protected]',1),
('Donald','Trump','[email protected]',1),
('Rashid','Maniyev','[email protected]',1),
]
Explanation: Datafreymin əsasına yerləşəcək verilənlər mənbə üzrə üzrə daxili və xarici formalara bölünür:
Python daxili mənbələr:
Siyahı: sətr əsaslı:
İlk misal cədvəl daxilində dəyərlərin ard-arda, sətr-bə-sətr, siyahı daxilində DataFreymə daxil edilməsidir.
SQL ilə tanış olan oxuyuculara bu dil daxilində İNSERT əmrin yada sala bilər.
Məsələn, eyni sorğu SQL vasitəsi ilə aşağıda qeyd olunmuş formada icra oluna bilər
İlk öncə kortejlərdən ibarət siyahı yaradırıq və onu email_list_lst adlı dəyişənə təhkim edirik.
End of explanation
email_list_lst_cln=['f_name','l_name','email_adrs','a_status',]
Explanation: Növbəti email_list_lst_cln dəyşəninə isə sütun adlarından ibarət siyahı təhkim edirik.
End of explanation
df=pd.DataFrame.from_records(email_list_lst, columns=email_list_lst_cln)
df
Explanation: Nəhayət, DataFrame-nin "from_records" funksiyasına email_list_lst və email_list_lst_cln dəyərlərini ötürüb email_list_lst dəyərlərindən email_list_lst_cln sütunları ilə cədvəl yaradırıq və sonra cədvəli əks etdiririk.
End of explanation
email_list_lst=[('f_name', ['Omar', 'Ali', 'Dmitry', 'Donald', 'Rashid',]),
('l_name', ['Bayramov', 'Aliyev', 'Vladimirov', 'Trump', 'Maniyev',]),
('email_adrs', ['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]',]),
('a_status', [1, 0, 1, 1, 1,]),
]
df = pd.DataFrame.from_items(email_list_lst)
df
Explanation: Siyahı: sütun əsaslı:
Əvvəlki misaldan fərqli olaraq bu dəfə məlumatı sütun-sütun qəbul edib cədvələ ötürən yanaşmadan istifadə olunacaq. Bunun üçün kortej siyahısından istifadə olunacaq ki hər bir kortej özü-özlüyündə sütun adın əks edən sətrdən və həmin sətrdə yerləşən dəyərlər siyahısından ibarətdir.
End of explanation
email_list=[{
'f_name' : 'Omar',
'l_name': 'Bayramov',
'email_adrs' : '[email protected]',
'a_status' : 1
},
{'f_name' : 'Ali', 'l_name': 'Aliyev', 'email_adrs':'[email protected]', 'a_status' : 0},
{'f_name': 'Dmitry', 'l_name': 'Vladimirov', 'email_adrs':'[email protected]', 'a_status':1},
{'f_name': 'Donald', 'l_name': 'Trump', 'email_adrs':'[email protected]', 'a_status':1},
{'f_name': 'Rashid', 'l_name': 'Maniyev', 'email_adrs':'[email protected]', 'a_status':1},
]
df=pd.DataFrame(email_list,)
df
Explanation: Lüğət: yazı əsaslı
Növbəti misal mənim ən çox tərcihə etdiyim (əlbəttə ki çox vaxt verilənlər istədiyimiz formada olmur, və bizim nəyə üstünlük verdiyimiz onları heç marağlandırmır) üsula keçirik. Bu üsula üstünlük verməyimin səbəbi çox sadədir: bundan əvvəl və bundan sonra qeyd olunmuş yollardan istifadə edərkən məlumatın əldə edilməsi zamanı və ya təmizləmə zamanı "NaN" dəyəri almadan bəzi məlumatlar pozula bilər ki bu da sütunun və ya yazının sürüşməsinə gətirə bilər ki o zaman analiz ya qismən çətinləşə, və ya ümumiyyətlə verilənlərin qatışması üzündan mənasın itirə bilər. ( Danışdığım problem ilə tanış olmaq üçün 2017/08 tarixində çalışdığım məlumat əldə edilməsi və analizi işimə baxa bilərsiniz.). Amma bu dəfə hər bir dəyər üzrə hansı sütuna aid olması açıq şəkildə qeyd olunur ki, qeyd olunmadığı halda avtomatik "NaN" olaraq qeyd olunur. Nəticədə əlavə rutin təmizləmə işi aparmağa ehtiyac olmur, olmayan dəyərləri isə araşdırmadan ya yığışdırmaq və ya digər metodlar ilə verilənlər ilə doldurmaq olur.
Sözügedən misala yaxından baxaq:
End of explanation
df=df[['f_name','l_name','email_adrs','a_status',]]
df
Explanation: Burada gördüyünüz kimi məlumat DataFrame daxilinə keçsədə, sütunlar istədiyimiz ardıcıllıqda yox, əlifba ardıcıllığı üzrə sıralanmışdır. Bu məqamı aradan qaldırmaq üçün ya yuxarıda DataFrame yaradan zamandaki kimi əvvəlcədən column parametri vasitəsi ilə sütun adların və ardıcıllığın qeyd etməli, və ya sonradan aşaqda qeyd olunmuş əmr ilə sütun yerlərin dəyşməliyik.
End of explanation
email_list_dct={'f_name': ['Omar', 'Ali', 'Dmitry', 'Donald', 'Rashid',],
'l_name': ['Bayramov', 'Aliyev', 'Vladimirov', 'Trump', 'Maniyev',],
'email_adrs': ['[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]',],
'a_status': [1, 0, 1, 1, 1,],
}
Explanation: Lüğət: sütun əsaslı
Bu misal yuxarıda üzərindən keçdiyimiz "Siyahı:sütun əsaslı"-ya çox oxşayır. Fərq dəyərlərin bu dəfə siyahı şəkilində lüğət açarı olaraq qeyd olunmasıdır.
End of explanation
df = pd.DataFrame.from_dict(email_list_dct)
df=df[['f_name','l_name','email_adrs','a_status',]]
df
Explanation: Cədvəli yaradaq və sütunların yerlərin dəyişək:
End of explanation
df = pd.read_excel('https://raw.githubusercontent.com/limpapud/datasets/master/Tutorial_datasets/excel_to_dataframe.xlsx',
sheet_name='data_for_ttrl')
df
Explanation: Python xarici mənbələr:
Standart, Python-un daxili verilən strukturlarından başqa Pandas fayl sistemi, Məlumat bazarı və digər mənbələrdən verilənləri əldə edib cədvəl qurmağa imkan yaradır.
Excel fayl
Cədvəli yaratmaq üçün pandas-ın read_excel funksiyasına Excel faylına işarələyən fayl sistemi yolu, fayl internetdə yerləşən zaman isə URL qeyd etmək bəsdir. Əgər faylda bir neçə səhifə varsa, və ya məhz olaraq müəyyən səhifədə yerləşən məlumatı əldə etmək lazımdırsa o zaman sheet_name parametrinə səhifə adın ötürmək ilə məlumatı cədvələ çevirmək olur.
End of explanation
df = pd.read_csv('https://raw.githubusercontent.com/limpapud/datasets/master/Tutorial_datasets/csv_to_dataframe.csv',
delimiter=',')
df
Explanation: CSV
Yuxarıdaki funksiyadaki kimi ilk öncə .csv fayla yol, amma sonra sətr daxilində dəyərləri bir birindən ayıran işarə delimiter parameterinə ötürülməlidir. Ötürülmədikdə standart olaraq vergülü qəbul olunur.
End of explanation
df = pd.read_json('https://raw.githubusercontent.com/limpapud/datasets/master/Tutorial_datasets/json_to_dataframe.json')
df = df[['f_name','l_name','email_adrs','a_status',]]
df
Explanation: JSON
Json faylından verilənləri qəbul etmək üçün URL və ya fayl sistemində fayla yol tələb olunur. Json faylı misalı aşağıda qeyd olunub.
Diqqət ilə baxdığınız halda özünüz üçün json faylın Lüğət: yazı əsaslı datafreym yaratma metodunda istifadə etdiyimiz dəyər təyinatından heç fərqi olmadığını görmüş oldunuz.
End of explanation
import sqlalchemy
from sqlalchemy import create_engine
import sqlite3
Explanation: SQL
Və son olaraq SQLite fayl məlumat bazasından məlumat sorğulayaq və datafreymə yerləşdirək. İlk öncə işimiz üçün tələb olunan modulları import edək.
End of explanation
engine = create_engine('sqlite:///C:/Users/omarbayramov/Documents/GitHub/datasets/Tutorial_datasets/sql_to_dataframe.db')
Explanation: Sorğulama üçün engine yaradaq və məlumat bazası faylına yolu göstərək.
End of explanation
con=engine.connect()
a=con.execute('SELECT * FROM emails')
Explanation: Qoşulma yaradıb, məlumat bazasında yerləşən emails cədvəlindən bütün sətrləri sorğulayaq.
End of explanation
data=a.fetchall()
a.close()
data
Explanation: Məlumat sorğulandıqdan sonra fetchall funksiyası vasitəsi ilə sətrləri "oxuyub" data dəyişkəninə təhkim edək və sonda MB bağlantısın bağlayaq.
End of explanation
df=pd.DataFrame(data, columns=['f_name','l_name','email_adrs','a_status',])
df
Explanation: Əldə olunan məlumatın strukturu tanış qəlir mi? Diqqət ilə baxsanız ilk tanış olduğumuz Siyahı: sətr əsaslı məlumat strukturun tanıyarsınız. Artıq tanış olduğumuz proseduru icra edərək cədvəl qurmaq qaldı:
End of explanation |
14,781 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
ES-DOC CMIP6 Model Properties - Ocnbgchem
MIP Era
Step1: Document Authors
Set document authors
Step2: Document Contributors
Specify document contributors
Step3: Document Publication
Specify document publication status
Step4: Document Table of Contents
1. Key Properties
2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
4. Key Properties --> Transport Scheme
5. Key Properties --> Boundary Forcing
6. Key Properties --> Gas Exchange
7. Key Properties --> Carbon Chemistry
8. Tracers
9. Tracers --> Ecosystem
10. Tracers --> Ecosystem --> Phytoplankton
11. Tracers --> Ecosystem --> Zooplankton
12. Tracers --> Disolved Organic Matter
13. Tracers --> Particules
14. Tracers --> Dic Alkalinity
1. Key Properties
Ocean Biogeochemistry key properties
1.1. Model Overview
Is Required
Step5: 1.2. Model Name
Is Required
Step6: 1.3. Model Type
Is Required
Step7: 1.4. Elemental Stoichiometry
Is Required
Step8: 1.5. Elemental Stoichiometry Details
Is Required
Step9: 1.6. Prognostic Variables
Is Required
Step10: 1.7. Diagnostic Variables
Is Required
Step11: 1.8. Damping
Is Required
Step12: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Time stepping method for passive tracers transport in ocean biogeochemistry
2.1. Method
Is Required
Step13: 2.2. Timestep If Not From Ocean
Is Required
Step14: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Time stepping framework for biology sources and sinks in ocean biogeochemistry
3.1. Method
Is Required
Step15: 3.2. Timestep If Not From Ocean
Is Required
Step16: 4. Key Properties --> Transport Scheme
Transport scheme in ocean biogeochemistry
4.1. Type
Is Required
Step17: 4.2. Scheme
Is Required
Step18: 4.3. Use Different Scheme
Is Required
Step19: 5. Key Properties --> Boundary Forcing
Properties of biogeochemistry boundary forcing
5.1. Atmospheric Deposition
Is Required
Step20: 5.2. River Input
Is Required
Step21: 5.3. Sediments From Boundary Conditions
Is Required
Step22: 5.4. Sediments From Explicit Model
Is Required
Step23: 6. Key Properties --> Gas Exchange
*Properties of gas exchange in ocean biogeochemistry *
6.1. CO2 Exchange Present
Is Required
Step24: 6.2. CO2 Exchange Type
Is Required
Step25: 6.3. O2 Exchange Present
Is Required
Step26: 6.4. O2 Exchange Type
Is Required
Step27: 6.5. DMS Exchange Present
Is Required
Step28: 6.6. DMS Exchange Type
Is Required
Step29: 6.7. N2 Exchange Present
Is Required
Step30: 6.8. N2 Exchange Type
Is Required
Step31: 6.9. N2O Exchange Present
Is Required
Step32: 6.10. N2O Exchange Type
Is Required
Step33: 6.11. CFC11 Exchange Present
Is Required
Step34: 6.12. CFC11 Exchange Type
Is Required
Step35: 6.13. CFC12 Exchange Present
Is Required
Step36: 6.14. CFC12 Exchange Type
Is Required
Step37: 6.15. SF6 Exchange Present
Is Required
Step38: 6.16. SF6 Exchange Type
Is Required
Step39: 6.17. 13CO2 Exchange Present
Is Required
Step40: 6.18. 13CO2 Exchange Type
Is Required
Step41: 6.19. 14CO2 Exchange Present
Is Required
Step42: 6.20. 14CO2 Exchange Type
Is Required
Step43: 6.21. Other Gases
Is Required
Step44: 7. Key Properties --> Carbon Chemistry
Properties of carbon chemistry biogeochemistry
7.1. Type
Is Required
Step45: 7.2. PH Scale
Is Required
Step46: 7.3. Constants If Not OMIP
Is Required
Step47: 8. Tracers
Ocean biogeochemistry tracers
8.1. Overview
Is Required
Step48: 8.2. Sulfur Cycle Present
Is Required
Step49: 8.3. Nutrients Present
Is Required
Step50: 8.4. Nitrous Species If N
Is Required
Step51: 8.5. Nitrous Processes If N
Is Required
Step52: 9. Tracers --> Ecosystem
Ecosystem properties in ocean biogeochemistry
9.1. Upper Trophic Levels Definition
Is Required
Step53: 9.2. Upper Trophic Levels Treatment
Is Required
Step54: 10. Tracers --> Ecosystem --> Phytoplankton
Phytoplankton properties in ocean biogeochemistry
10.1. Type
Is Required
Step55: 10.2. Pft
Is Required
Step56: 10.3. Size Classes
Is Required
Step57: 11. Tracers --> Ecosystem --> Zooplankton
Zooplankton properties in ocean biogeochemistry
11.1. Type
Is Required
Step58: 11.2. Size Classes
Is Required
Step59: 12. Tracers --> Disolved Organic Matter
Disolved organic matter properties in ocean biogeochemistry
12.1. Bacteria Present
Is Required
Step60: 12.2. Lability
Is Required
Step61: 13. Tracers --> Particules
Particulate carbon properties in ocean biogeochemistry
13.1. Method
Is Required
Step62: 13.2. Types If Prognostic
Is Required
Step63: 13.3. Size If Prognostic
Is Required
Step64: 13.4. Size If Discrete
Is Required
Step65: 13.5. Sinking Speed If Prognostic
Is Required
Step66: 14. Tracers --> Dic Alkalinity
DIC and alkalinity properties in ocean biogeochemistry
14.1. Carbon Isotopes
Is Required
Step67: 14.2. Abiotic Carbon
Is Required
Step68: 14.3. Alkalinity
Is Required | Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nerc', 'sandbox-3', 'ocnbgchem')
Explanation: ES-DOC CMIP6 Model Properties - Ocnbgchem
MIP Era: CMIP6
Institute: NERC
Source ID: SANDBOX-3
Topic: Ocnbgchem
Sub-Topics: Tracers.
Properties: 65 (37 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:27
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
Explanation: Document Authors
Set document authors
End of explanation
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
Explanation: Document Contributors
Specify document contributors
End of explanation
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
Explanation: Document Publication
Specify document publication status
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
4. Key Properties --> Transport Scheme
5. Key Properties --> Boundary Forcing
6. Key Properties --> Gas Exchange
7. Key Properties --> Carbon Chemistry
8. Tracers
9. Tracers --> Ecosystem
10. Tracers --> Ecosystem --> Phytoplankton
11. Tracers --> Ecosystem --> Zooplankton
12. Tracers --> Disolved Organic Matter
13. Tracers --> Particules
14. Tracers --> Dic Alkalinity
1. Key Properties
Ocean Biogeochemistry key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of ocean biogeochemistry model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean biogeochemistry model code (PISCES 2.0,...)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 1.3. Model Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of ocean biogeochemistry model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
Explanation: 1.4. Elemental Stoichiometry
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe elemental stoichiometry (fixed, variable, mix of the two)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.5. Elemental Stoichiometry Details
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe which elements have fixed/variable stoichiometry
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
List of all prognostic tracer variables in the ocean biogeochemistry component
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.7. Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
List of all diagnotic tracer variables in the ocean biogeochemistry component
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 1.8. Damping
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any tracer damping used (such as artificial correction or relaxation to climatology,...)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
Explanation: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Time stepping method for passive tracers transport in ocean biogeochemistry
2.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time stepping framework for passive tracers
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 2.2. Timestep If Not From Ocean
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Time step for passive tracers (if different from ocean)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
Explanation: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Time stepping framework for biology sources and sinks in ocean biogeochemistry
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time stepping framework for biology sources and sinks
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
Explanation: 3.2. Timestep If Not From Ocean
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Time step for biology sources and sinks (if different from ocean)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
Explanation: 4. Key Properties --> Transport Scheme
Transport scheme in ocean biogeochemistry
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of transport scheme
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 4.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Transport scheme used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 4.3. Use Different Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Decribe transport scheme if different than that of ocean model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
Explanation: 5. Key Properties --> Boundary Forcing
Properties of biogeochemistry boundary forcing
5.1. Atmospheric Deposition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how atmospheric deposition is modeled
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
Explanation: 5.2. River Input
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how river input is modeled
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.3. Sediments From Boundary Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
List which sediments are speficied from boundary condition
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 5.4. Sediments From Explicit Model
Is Required: FALSE Type: STRING Cardinality: 0.1
List which sediments are speficied from explicit sediment model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6. Key Properties --> Gas Exchange
*Properties of gas exchange in ocean biogeochemistry *
6.1. CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CO2 gas exchange modeled ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 6.2. CO2 Exchange Type
Is Required: FALSE Type: ENUM Cardinality: 0.1
Describe CO2 gas exchange
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6.3. O2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is O2 gas exchange modeled ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 6.4. O2 Exchange Type
Is Required: FALSE Type: ENUM Cardinality: 0.1
Describe O2 gas exchange
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6.5. DMS Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is DMS gas exchange modeled ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6.6. DMS Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify DMS gas exchange scheme type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6.7. N2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is N2 gas exchange modeled ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6.8. N2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify N2 gas exchange scheme type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6.9. N2O Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is N2O gas exchange modeled ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6.10. N2O Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify N2O gas exchange scheme type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6.11. CFC11 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CFC11 gas exchange modeled ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6.12. CFC11 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify CFC11 gas exchange scheme type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6.13. CFC12 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CFC12 gas exchange modeled ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6.14. CFC12 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify CFC12 gas exchange scheme type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6.15. SF6 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is SF6 gas exchange modeled ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6.16. SF6 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify SF6 gas exchange scheme type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6.17. 13CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is 13CO2 gas exchange modeled ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6.18. 13CO2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify 13CO2 gas exchange scheme type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 6.19. 14CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is 14CO2 gas exchange modeled ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6.20. 14CO2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify 14CO2 gas exchange scheme type
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 6.21. Other Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any other gas exchange
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
Explanation: 7. Key Properties --> Carbon Chemistry
Properties of carbon chemistry biogeochemistry
7.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how carbon chemistry is modeled
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 7.2. PH Scale
Is Required: FALSE Type: ENUM Cardinality: 0.1
If NOT OMIP protocol, describe pH scale.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 7.3. Constants If Not OMIP
Is Required: FALSE Type: STRING Cardinality: 0.1
If NOT OMIP protocol, list carbon chemistry constants.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 8. Tracers
Ocean biogeochemistry tracers
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of tracers in ocean biogeochemistry
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 8.2. Sulfur Cycle Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sulfur cycle modeled ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 8.3. Nutrients Present
Is Required: TRUE Type: ENUM Cardinality: 1.N
List nutrient species present in ocean biogeochemistry model
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 8.4. Nitrous Species If N
Is Required: FALSE Type: ENUM Cardinality: 0.N
If nitrogen present, list nitrous species.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 8.5. Nitrous Processes If N
Is Required: FALSE Type: ENUM Cardinality: 0.N
If nitrogen present, list nitrous processes.
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9. Tracers --> Ecosystem
Ecosystem properties in ocean biogeochemistry
9.1. Upper Trophic Levels Definition
Is Required: TRUE Type: STRING Cardinality: 1.1
Definition of upper trophic level (e.g. based on size) ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 9.2. Upper Trophic Levels Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Define how upper trophic level are treated
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
Explanation: 10. Tracers --> Ecosystem --> Phytoplankton
Phytoplankton properties in ocean biogeochemistry
10.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of phytoplankton
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 10.2. Pft
Is Required: FALSE Type: ENUM Cardinality: 0.N
Phytoplankton functional types (PFT) (if applicable)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 10.3. Size Classes
Is Required: FALSE Type: ENUM Cardinality: 0.N
Phytoplankton size classes (if applicable)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 11. Tracers --> Ecosystem --> Zooplankton
Zooplankton properties in ocean biogeochemistry
11.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of zooplankton
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 11.2. Size Classes
Is Required: FALSE Type: ENUM Cardinality: 0.N
Zooplankton size classes (if applicable)
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 12. Tracers --> Disolved Organic Matter
Disolved organic matter properties in ocean biogeochemistry
12.1. Bacteria Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there bacteria representation ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 12.2. Lability
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe treatment of lability in dissolved organic matter
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13. Tracers --> Particules
Particulate carbon properties in ocean biogeochemistry
13.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is particulate carbon represented in ocean biogeochemistry?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.2. Types If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, type(s) of particulate matter taken into account
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
Explanation: 13.3. Size If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe if a particule size spectrum is used to represent distribution of particules in water volume
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
Explanation: 13.4. Size If Discrete
Is Required: FALSE Type: STRING Cardinality: 0.1
If prognostic and discrete size, describe which size classes are used
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
Explanation: 13.5. Sinking Speed If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, method for calculation of sinking speed of particules
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
Explanation: 14. Tracers --> Dic Alkalinity
DIC and alkalinity properties in ocean biogeochemistry
14.1. Carbon Isotopes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which carbon isotopes are modelled (C13, C14)?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
Explanation: 14.2. Abiotic Carbon
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is abiotic carbon modelled ?
End of explanation
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
Explanation: 14.3. Alkalinity
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is alkalinity modelled ?
End of explanation |
14,782 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Intro" data-toc-modified-id="Intro-1"><span class="toc-item-num">1 </span>Intro</a></span></li><li><span><a href="#Colours-Arranged-by-Chance" data-toc-modified-id="Colours-Arranged-by-Chance-2"><span class="toc-item-num">2 </span>Colours Arranged by Chance</a></span></li><li><span><a href="#Fractals" data-toc-modified-id="Fractals-3"><span class="toc-item-num">3 </span>Fractals</a></span><ul class="toc-item"><li><span><a href="#Sierpinski-Triangle" data-toc-modified-id="Sierpinski-Triangle-3.1"><span class="toc-item-num">3.1 </span><a href="https
Step1: Colours Arranged by Chance
Mimicry of Gerhard Richter’s 4900 Colours artwork.
Description
Step2: Fractals
Mathematical chaotic systems that have property of self-similarity (expanding/evolving simmetry).
3Blue1Brown on Fractals, and the fact that they are typically not self-similar
Related concepts
Step3: L-System
L-system or Lindenmayer system is a parallel rewriting systems. Parallel because "as many rules as possible are applied simultaneously, per iteration". This differs from a formal grammar that instead applies one rule per iteration.
An L-system consist of an alphabet (variables + constants), a collection of production rules and an initial axiom. Optionally for graphic representation a translation mechanism is used to translate a string to a geometry.
L-Systems can be used to generate self-similar fractals.
Disclaimer
Step4: Koch Curve
Step5: Fractal Plant
Step6: Dragon Curve
Step7: Spirograph
A spirograph is a drawing tool based on mathematical roulette curves.
Step8: Hypotrochoid
Roulette curve defined by
$$ x(\theta )=(R-r)\cos \theta +d\cos \left({R-r \over r}\theta \right)$$
$$y(\theta )=(R-r)\sin \theta -d\sin \left({R-r \over r}\theta \right)$$ | Python Code:
# Basic libraries import
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import animation
from PIL import Image, ImageDraw
import os
import sys
import itertools
import collections
from math import cos, sin, pi
# Plotting
%matplotlib notebook
sns.set_context("paper")
sns.set_style("dark")
# util method to plot multiple version of same generative artwork
def plot_artworks(artwork_gen_fun, nb_plots_side):
# Create a grid of random colours arrangement pieces
fig, axarr = plt.subplots(nb_plots_side, nb_plots_side)
for row in range(nb_plots_side):
for col in range(nb_plots_side):
axarr[row, col].imshow(artwork_gen_fun(row, col))
axarr[row, col].set_title('')
axarr[row, col].axis('off')
plt.show()
Explanation: <h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Intro" data-toc-modified-id="Intro-1"><span class="toc-item-num">1 </span>Intro</a></span></li><li><span><a href="#Colours-Arranged-by-Chance" data-toc-modified-id="Colours-Arranged-by-Chance-2"><span class="toc-item-num">2 </span>Colours Arranged by Chance</a></span></li><li><span><a href="#Fractals" data-toc-modified-id="Fractals-3"><span class="toc-item-num">3 </span>Fractals</a></span><ul class="toc-item"><li><span><a href="#Sierpinski-Triangle" data-toc-modified-id="Sierpinski-Triangle-3.1"><span class="toc-item-num">3.1 </span><a href="https://en.wikipedia.org/wiki/Sierpinski_triangle" target="_blank">Sierpinski Triangle</a></a></span></li></ul></li><li><span><a href="#L-System" data-toc-modified-id="L-System-4"><span class="toc-item-num">4 </span>L-System</a></span><ul class="toc-item"><li><span><a href="#Algae" data-toc-modified-id="Algae-4.1"><span class="toc-item-num">4.1 </span>Algae</a></span></li><li><span><a href="#Koch-Curve" data-toc-modified-id="Koch-Curve-4.2"><span class="toc-item-num">4.2 </span>Koch Curve</a></span></li><li><span><a href="#Fractal-Plant" data-toc-modified-id="Fractal-Plant-4.3"><span class="toc-item-num">4.3 </span>Fractal Plant</a></span></li><li><span><a href="#Dragon-Curve" data-toc-modified-id="Dragon-Curve-4.4"><span class="toc-item-num">4.4 </span>Dragon Curve</a></span></li></ul></li><li><span><a href="#Spirograph" data-toc-modified-id="Spirograph-5"><span class="toc-item-num">5 </span>Spirograph</a></span><ul class="toc-item"><li><span><a href="#Hypotrochoid" data-toc-modified-id="Hypotrochoid-5.1"><span class="toc-item-num">5.1 </span>Hypotrochoid</a></span></li></ul></li></ul></div>
Intro
This notebook explores introductory concepts and examples of generative art and computational creativity, especially guided/driven by the related Kadenze Online course.
Generative Art: art generated via automated/autonomous procedures/processes
Computational Creativity: study of autonomous/computational process/systems for the resolution of creative tasks
End of explanation
def generate_colours_arranged(nb_squares_side: int, square_side: int):
img_side = nb_squares_side * square_side
img = Image.new('RGB', (img_side, img_side), (255, 255, 255))
draw = ImageDraw.Draw(img)
for x in range(nb_squares_side):
for y in range(nb_squares_side):
cur_x_pos = x*square_side
cur_y_pos = y*square_side
rand_color = np.random.randint(256, size=3)
draw.rectangle([cur_x_pos, cur_y_pos,
cur_x_pos+square_side, cur_y_pos+square_side],
fill=tuple(rand_color))
return img
nb_squares_side = 5
square_side = 10
plot_artworks(lambda row, col: generate_colours_arranged(nb_squares_side, square_side),
nb_plots_side=4)
# test to generate something similar to Mondrian work
# here just a very rough/minimal example
def generate_mondrian(width: int, height: int):
img_side = nb_squares_side * square_side
img = Image.new('RGB', (width, height), (255, 255, 255))
g_const = 5
num_rects = g_const*g_const
rect_min_side = width/g_const
rect_max_side = rect_min_side * 2
draw = ImageDraw.Draw(img)
prev_x_pos = 0
prev_y_pos = 0
for i in range(num_rects):
rect_width = np.random.randint(rect_min_side, rect_max_side)
rect_height = np.random.randint(rect_min_side, rect_max_side)
rand_color = np.random.randint(256, size=3)
draw.rectangle([prev_x_pos, prev_y_pos,
prev_x_pos+rect_width, prev_y_pos+rect_height],
fill=tuple(rand_color),
outline=(0, 0, 0))
prev_x_pos += rect_width
if prev_x_pos > width:
prev_x_pos = 0
prev_y_pos += rect_height
return img
plot_artworks(lambda row, col: generate_mondrian(width=300, height=400),
nb_plots_side=2)
Explanation: Colours Arranged by Chance
Mimicry of Gerhard Richter’s 4900 Colours artwork.
Description: generate NxM grid of squares each assigned with a random color.
Possibly add-ons:
* actual logic behind choice of color schema or more specific/meaningful seeding for the stochastic process
* Mondrian imitation
End of explanation
# draw the emergent central triangle (white) in a recursive way
# to simulate the sierpinski triangle
def rec_shrink_step(draw, triangle: list, depth: int=0, max_depth=1):
# stop condition
if depth >max_depth:
return
# for now just draw the emergent central hole
hole = [((triangle[1][0]-triangle[0][0])/2+triangle[0][0], (triangle[0][1]-triangle[1][1])/2+triangle[1][1]),
((triangle[2][0]-triangle[1][0])/2+triangle[1][0], (triangle[2][1]-triangle[1][1])/2+triangle[1][1]),
((triangle[1][0], triangle[2][1]))]
draw.polygon(hole, fill=(255, 255, 255))
t1 = [triangle[0], hole[0], hole[2]]
t2 = [hole[0], triangle[1], hole[1]]
t3 = [hole[2], hole[1], triangle[2]]
rec_shrink_step(draw, t1, depth+1, max_depth)
rec_shrink_step(draw, t2, depth+1, max_depth)
rec_shrink_step(draw, t3, depth+1, max_depth)
# main method to draw a sierpinski triangle
def sierpinski_triangle(img_side: int, max_depth: int):
img = Image.new('RGB', (img_side, img_side), (255, 255, 255))
draw = ImageDraw.Draw(img)
triangle = [(0, img_side), (img_side/2, 0), (img_side, img_side)]
triangle_color = (0, 0, 0)
draw.polygon(triangle, fill=triangle_color)
rec_shrink_step(draw, triangle, max_depth=max_depth)
return img
sierpinski_triangle(500, 4)
plot_artworks(lambda row, col: sierpinski_triangle(1000, row+col),
nb_plots_side=4)
Explanation: Fractals
Mathematical chaotic systems that have property of self-similarity (expanding/evolving simmetry).
3Blue1Brown on Fractals, and the fact that they are typically not self-similar
Related concepts:
* chaos theory
* attractors
Sierpinski Triangle
Possible add-ons:
* generalize by shape
* 3D version (use Blender)
End of explanation
# L-system definition
variables = ['B', 'A']
axiom = ['A']
def rules(var):
# verify that given var is in the system alphabet
if var not in variables:
raise Exception("{} not in the alphabet".format(var))
if var == 'A':
return ['A', 'B']
elif var == 'B':
return ['A']
NB_ITERATIONS = 10
res = axiom
for i in range(1, NB_ITERATIONS):
res = list(itertools.chain(*[rules(x) for x in res]))
print("n = {} : {}".format(i, res))
Explanation: L-System
L-system or Lindenmayer system is a parallel rewriting systems. Parallel because "as many rules as possible are applied simultaneously, per iteration". This differs from a formal grammar that instead applies one rule per iteration.
An L-system consist of an alphabet (variables + constants), a collection of production rules and an initial axiom. Optionally for graphic representation a translation mechanism is used to translate a string to a geometry.
L-Systems can be used to generate self-similar fractals.
Disclaimer: in some of the following examples I obtain visual results that are similar but actually wrong as I am applying the drawing rule to values before they are expanded and then proceed with the recursion.
A more correct approach would be to get the results from the final iteration and then proceed to apply the drawing rule. Turtle is probably the most suited Python library for the drawing task.
Algae
"Lindenmayer's original L-system for modelling the growth of algae."
End of explanation
# L-system definition
variables = ['F']
constants = ['-', '+']
axiom = ['F']
def rules(val):
# verify that given val is in the system alphabet
if val not in variables and val not in constants:
raise Exception("{} not in the alphabet".format(val))
if val in constants:
return []
elif val == 'F':
return list('F+F-F-F+F')
def rec_draw_koch_curve(draw, vals, pos: tuple, angle=0, depth=0, max_depth=3):
LINE_LENGTH = 10
ANGLE_ADD =90
if depth >= max_depth:
return angle, pos
for val in vals:
if val == '+':
angle += ANGLE_ADD
elif val == '-':
angle -= ANGLE_ADD
elif val == 'F':
new_pos = (pos[0] + LINE_LENGTH*cos(angle*(pi/180)),pos[1] + LINE_LENGTH*sin(angle*(pi/180)))
draw.line([pos, new_pos], fill=(0, 0, 255))
pos = new_pos
angle, pos = rec_draw_koch_curve(draw, rules(val), pos, angle, depth=depth+1, max_depth=max_depth)
return angle, pos
img = Image.new('RGB', (500, 500), (255, 255, 255))
draw = ImageDraw.Draw(img)
rec_draw_koch_curve(draw, axiom, (199, 50), 90, 0, max_depth=5)
img
Explanation: Koch Curve
End of explanation
# L-system definition
variables = ['X', 'F']
constants = ['-', '+', '[', ']']
axiom = ['X']
def rules(val):
# verify that given var is in the system alphabet
if val not in variables and val not in constants:
raise Exception("{} not in the alphabet".format(val))
if val in constants:
return [val]
elif val == 'X':
return list('F+[[X]-X]-F[-FX]+X')
elif val == 'F':
return ['F', 'F']
NB_ITERATIONS = 3
res = axiom
for i in range(1, NB_ITERATIONS):
res = list(itertools.chain(*[rules(x) for x in res]))
print("n = {} : {}".format(i, res))
def draw_fractal_plant(draw, plant, pos: tuple, angle=0):
LINE_LENGTH = 5
ANGLE_ADD = 25
skip = 0
count = 0
for i, val in enumerate(plant):
#print(skip)
count += 1
if skip > 0:
skip -= 1
continue
elif val not in variables and val not in constants:
raise Exception("{} not in the alphabet".format(val))
elif val in constants:
if val == '+':
angle += ANGLE_ADD
elif val == '-':
angle -= ANGLE_ADD
elif val == '[':
skip = draw_fractal_plant(draw, plant[i+1:], (pos[0], pos[1]), angle)
elif val == ']':
return count
elif val == 'X':
continue
elif val == 'F':
new_pos = (pos[0] + LINE_LENGTH*cos(angle*(pi/180)),pos[1] + LINE_LENGTH*sin(angle*(pi/180)))
draw.line([pos, new_pos], fill=(0, 0, 0))
#print(new_pos)
pos = new_pos
NB_ITERATIONS = 6
res = axiom
for i in range(1, NB_ITERATIONS):
res = list(itertools.chain(*[rules(x) for x in res]))
img = Image.new('RGB', (500, 500), (255, 255, 255))
draw = ImageDraw.Draw(img)
draw_fractal_plant(draw, res, (0, 250), 25)
img
Explanation: Fractal Plant
End of explanation
# L-system definition
variables = ['X', 'Y']
constants = ['-', '+', 'F']
axiom = ['F', 'X']
def rules(val):
# verify that given var is in the system alphabet
if val not in variables and val not in constants:
raise Exception("{} not in the alphabet".format(val))
if val in constants:
return []
elif val == 'X':
return list('X+YF+')
elif val == 'Y':
return list('-FX-Y')
def rec_draw_dragon_curve(draw, vals, pos: tuple, angle=0, depth=0, max_depth=3):
LINE_LENGTH = 10
ANGLE_ADD =90
if depth >= max_depth:
return angle, pos
for val in vals:
if val == '+':
angle += ANGLE_ADD
elif val == '-':
angle -= ANGLE_ADD
elif val == 'F':
new_pos = (pos[0] + LINE_LENGTH*cos(angle*(pi/180)),pos[1] + LINE_LENGTH*sin(angle*(pi/180)))
draw.line([pos, new_pos], fill=(0, 0, 255))
pos = new_pos
angle, pos = rec_draw_dragon_curve(draw, rules(val), pos, angle, depth=depth+1, max_depth=max_depth)
return angle, pos
img = Image.new('RGB', (700, 500), (255, 255, 255))
draw = ImageDraw.Draw(img)
rec_draw_dragon_curve(draw, axiom, (450, 150), 90, 0, max_depth=11)
img
Explanation: Dragon Curve
End of explanation
class Spirograph:
def __init__(self, origin, R, r, d, angle, theta):
self.origin = origin
self.R = R
self.r = r
self.d = d
self.angle = angle
self.theta = theta
def update(self):
self.angle += self.theta
Explanation: Spirograph
A spirograph is a drawing tool based on mathematical roulette curves.
End of explanation
%matplotlib notebook
fig, ax = plt.subplots(dpi=120, figsize=(5, 5))
img_size = 500
img = Image.new('RGB', (img_size, img_size), 'white')
origin = np.array([img_size//2, img_size//2])
spirograph = Spirograph(origin=origin, R=125, r=75, d=125, angle=0, theta=0.2)
im = ax.imshow(img)
plt.axis('off')
def animate(i, img, im, spirograph):
#img = Image.new('RGB', (img_size, img_size), 'white')
draw = ImageDraw.Draw(img)
origin = spirograph.origin
R = spirograph.R
r = spirograph.r
d = spirograph.d
angle = spirograph.angle
# draw main circle
#draw.ellipse([tuple(origin-R), tuple(origin+R)], outline=(0, 0, 255))
# draw inside circle
circle_2_pos = origin + (R - r) * np.array([np.cos(angle), np.sin(angle)])
#draw.ellipse([tuple(circle_2_pos-r), tuple(circle_2_pos+r)], outline=(255, 0, 0))
# draw hypotrochoid
point_x = circle_2_pos[0] + d * np.cos(((R-r)/r)*angle)
point_y = circle_2_pos[1] - d * np.sin(((R-r)/r)*angle)
point = np.array([point_x, point_y])
draw.ellipse([tuple(point-2), tuple(point+2)], fill='black')
#draw.line([tuple(circle_2_pos), tuple(point)], fill='black')
im.set_data(img)
spirograph.update()
ani = animation.FuncAnimation(fig, animate, frames=500, interval=50,
fargs=[img, im, spirograph])
Explanation: Hypotrochoid
Roulette curve defined by
$$ x(\theta )=(R-r)\cos \theta +d\cos \left({R-r \over r}\theta \right)$$
$$y(\theta )=(R-r)\sin \theta -d\sin \left({R-r \over r}\theta \right)$$
End of explanation |
14,783 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Learning avalanche problems by meteorological factors
Step1: Split into test and traininng data to run a prediction
We use the avalanche forecasts from Nordvestlandet including the forecasting regions Trollheimen, Romsdalen and Sunnmøre. We keep only the parameters provided by the mountain weather forecast. Besides the weather data for the current day we add the precipitation from the previous day as an additional parameter.
We use 75% of the data for training the model and the remaining 25% to test the model afterwards.
Step2: We can now compare the prediction by the model to the given target values in the test dataset.
Step3: Investigating the metrics of the model | Python Code:
import pandas as pd
import numpy as np
import json
import graphviz
import matplotlib.pyplot as plt
from sklearn import tree
from sklearn.preprocessing import LabelEncoder
from pprint import pprint
pd.set_option("display.max_rows",6)
%matplotlib inline
Explanation: Learning avalanche problems by meteorological factors
End of explanation
df_numdata = pd.read_csv('varsel_nordvestlandet_17_18.csv', index_col=0)
### Remove the "2|" in column Rainfall_Average
df_numdata = df_numdata[df_numdata['Rainfall_Average'] != '2|']
### create new data columns with previous days weather data
df_numdata['Rainfall_Most_exposed_area_-1day'] = 0 # precip on the day before - be aware that missing index/day will set previous day to zero
for index, row in df_numdata.iterrows():
try:
df_numdata.loc[index, 'Rainfall_Most_exposed_area_-1day'] = df_numdata.loc[index-1, 'Rainfall_Most_exposed_area']
except KeyError:
print(index-1)
### Randomly shuffle the index of nba.
random_indices = np.random.permutation(df_numdata.index)
### Set a cutoff for how many items we want in the test set (in this case 1/3 of the items)
test_cutoff = np.int(np.floor(len(df_numdata)/4))
print(test_cutoff)
### Generate the test set by taking the first 1/3 of the randomly shuffled indices.
df_test = df_numdata.loc[random_indices[1:test_cutoff]]
### Generate the train set with the rest of the data.
df_train = df_numdata.loc[random_indices[test_cutoff:]]
### Keep only the columns containing weather data...
df_train_target = df_train.filter(['AvalancheProblems_0_Class_AvalancheProblemTypeId'], axis=1)
df_train_input = df_train.filter(['Rainfall_Most_exposed_area',
'Rainfall_Average',
'Wind_Speed_Num',
'Wind_Direction_Num',
'Temperature_Min',
'Temperature_Max',
'Temperature_masl',
'Freezing_Level_masl',
'Rainfall_Most_exposed_area_-1day'], axis=1)
### ...and split between input and target
df_test_target = df_test.filter(['AvalancheProblems_0_Class_AvalancheProblemTypeId'], axis=1)
df_test_input = df_test.filter(['Rainfall_Most_exposed_area',
'Rainfall_Average',
'Wind_Speed_Num',
'Wind_Direction_Num',
'Temperature_Min',
'Temperature_Max',
'Temperature_masl',
'Freezing_Level_masl',
'Rainfall_Most_exposed_area_-1day'], axis=1)
### get the correct target labels
with open(r'../config/snoskred_keys.json') as jdata:
snoskred_keys = json.load(jdata)
enc = LabelEncoder()
label_encoder = enc.fit(df_train_target['AvalancheProblems_0_Class_AvalancheProblemTypeId'])
print ("Categorical classes:", label_encoder.classes_)
class_names2 = []
for l in label_encoder.classes_:
class_names2.append(snoskred_keys['Class_AvalancheProblemTypeName'][str(l)])
print(class_names2)
###
train_input = np.array(df_train_input.values, dtype=float)
train_target = np.array(df_train_target.values, dtype=float)
clf2 = tree.DecisionTreeClassifier(min_samples_leaf=8)
clf2 = clf2.fit(train_input, train_target)
### could also use
#clf2 = clf2.fit(df_train_input.values, df_train_target.values)
dot_data2 = tree.export_graphviz(clf2, out_file=None,
feature_names = df_train_input.columns.values,
class_names = class_names2,
#proportion = True, # show precentages instead of members
label = "root",
filled=True, rounded=True, special_characters=True
)
graph2 = graphviz.Source(dot_data2)
graph2.render("avalanche_problem_meteo_train")
Explanation: Split into test and traininng data to run a prediction
We use the avalanche forecasts from Nordvestlandet including the forecasting regions Trollheimen, Romsdalen and Sunnmøre. We keep only the parameters provided by the mountain weather forecast. Besides the weather data for the current day we add the precipitation from the previous day as an additional parameter.
We use 75% of the data for training the model and the remaining 25% to test the model afterwards.
End of explanation
test_input = np.array(df_test_input.values, dtype=float)
test_target = np.array(df_test_target.values, dtype=float)
y = clf2.predict(test_input)
s = clf2.score(test_input, test_target)
i = np.arange(len(y))
fig = plt.figure(figsize=(15,10))
ax = fig.gca()
ax.scatter(i, np.squeeze(test_target), label='Truth')
ax.scatter(i, y, label='Prediction')
plt.xlabel('Index')
#ax = fig.gca()
#index_labels = ax.get_yticklabels()
#named_labels = [snoskred_keys['Class_AvalancheProblemTypeName'][l] for l in index_labels]
#print(list(index_labels))#, named_labels)
named_labels = ["Loose dry", "Loose wet", "Glide avalanche", "Wet slab", "Storm slab", "Wind slab", "Persistent slab",]
ax.set_yticklabels(named_labels)
plt.title('Trained on {2} cases\nTesting {0} cases\nClassification score = {1:0.2f}'.format(len(test_target), s, len(train_target)))
plt.legend()
plt.savefig('nordvestlandet_prediction.pdf')
Explanation: We can now compare the prediction by the model to the given target values in the test dataset.
End of explanation
from sklearn import metrics
def measure_performance(X,y,clf, show_accuracy=True, show_classification_report=True, show_confusion_matrix=True):
y_pred=clf.predict(X)
if show_accuracy:
print ("Accuracy:{0:.3f}".format(metrics.accuracy_score(y,y_pred)),"\n")
if show_classification_report:
print ("Classification report")
print (metrics.classification_report(y,y_pred),"\n")
if show_confusion_matrix:
print ("Confusion matrix")
print (metrics.confusion_matrix(y,y_pred),"\n")
measure_performance(test_input, test_target,clf2)#, show_classification_report=False, show_confusion_matrix=False)
Explanation: Investigating the metrics of the model
End of explanation |
14,784 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Introduction to numerical simulations
Step1: Next, we will need parameters for the simulation. These are known as initial condititons. For a 2 body gravitation problem, we'll need to know the masses of the two objects, the starting posistions of the two objects, and the starting velocities of the two objects.
Below, I've included the initial conditions for the earth (a) and the Sun (b) at the average distance from the sun and the average velocity around the sun. We also need a starting time, and ending time for the simulation, and a "time-step" for the system. Feel free to adjust all of these as you see fit once you have built the system!
<br>
<br>
<br>
<br>
a note on dt
Step2: It will be nice to create a function for the force between Ma and Mb. Below is the physics for the force of Ma on Mb. How the physics works here is not important for the moment. Right now, I want to make sure you can translate the math shown into a python function. (I'll show a picture of the physics behind this math for those interested.)
$$\vec{F_g}=\frac{-GM_aM_b}{r^3}\vec{r}$$
and
$$\vec{r}=(x_b-x_a)\hat{x}+ (y_b-y_a)\hat{y}$$
$$r^3=((x_b-x_a)^2+(y_b-y_a)^2)^{3/2}$$
If we break Fg into the x and y componets we get
Step3: Now that we have our force function, we will make a new function which does the whole simulation for a set of initial conditions. We call this function 'simulate' and it will take all the initial conditions as inputs. It will loop over each time step and call the force function to find the new positions for the asteroids at each time step.
The first part of our simulate function will be to initialize the loop and choose a loop type, for or while. Below is the general outline for how each type of loop can go.
<br>
<br>
<br>
For loop
Step4: Now we will call our simulate function with the initial conditions we defined earlier! We will take the output of simulate and store the x and y positions of the two particles.
Step5: Now for the fun part (or not so fun part if your simulation has an issue), plot your results! This is something well covered in previous lectures. Show me a plot of (xa,ya) and (xb,yb). Does it look sort of familiar? Hopefully you get something like the below image (in units of AU).
Step6: Challenge #1
Step7: We now wish to draw a random sample of asteroid masses from this distribution (Hint
Step8: Now let's loop over our random asteroid sample, run simulate and plot the results, for each one!
Step9: Going further
Step10: Additionally, publications won't always be printed in color, and not all readers have the ability to distinguish colors or text size in the same way, so differences in style improve accessibility as well.
Luckily, Matplotlib can do all of this and more! Let's experiment with some variations in how we can make our plots. We can use the 'marker =' argument in plt.plot to choose a marker for every datapoint. We can use the 'linestyle = ' argument to have a dotted line instead of a solid line. Try experimenting with the extra arguments in the below plotting code to make it look good to you! | Python Code:
#Physical Constants (SI units)
G=6.67e-11 #Universal Gravitational constant in m^3 per kg per s^2
AU=1.5e11 #Astronomical Unit in meters = Distance between sun and earth
daysec=24.0*60*60 #seconds in a day
Explanation: Introduction to numerical simulations: The 2 Body Problem
Many problems in statistical physics and astrophysics require solving problems consisting of many particles at once (sometimes on the order of thousands or more!) This can't be done by the traditional pen and paper techniques you would encounter in a physics class. Instead, we must implement numerical solutions to these problems.
Today, you will create your own numerical simulation for a simple problem is that solvable by pen and paper already, the 2 body problem in 2D. In this problem, we will describe the motion between two particles that share a force between them (such as Gravity). We'll design the simulation from an astronomer's mindset with astronomical units in mind. This simulation will be used to confirm the general motion of the earth around the Sun, and later will be used to predict the motion between two stars within relatively close range.
<br>
<br>
<br>
We will guide you through the physics and math required to create this simulation.
First, a brief review of the kinematic equations (remembering Order of Operations or PEMDAS, and that values can be positive or negative depending on the reference frame):
new time = old time + time change ($t = t_0 + \Delta t$)
new position = old position + velocity x time change ($x = x_0 + v \times \Delta t$)
new velocity = old velocity + acceleration x time change ($v = v_0 + a \times \Delta t$)
The problem here is designed to use the knowledge of scientific python you have been developing this week.
Like any code in python, The first thing we need to do is import the libraries we need. Go ahead and import Numpy and Pyplot below as np and plt respectively. Don't forget to put matplotlib inline to get everything within the notebook.
Now we will define the physical constants of our system, which will also establish the unit system we have chosen. We'll use SI units here. Below, I've already created the constants. Make sure you understand what they are before moving on.
End of explanation
#####run specific constants. Change as needed#####
#Masses in kg
Ma=6.0e24 #always set as smaller mass
Mb=2.0e30 #always set as larger mass
#Time settings
t=0.0 #Starting time
dt=.01*daysec #Time set for simulation
tend=300*daysec #Time where simulation ends
#Initial conditions (position [m] and velocities [m/s] in x,y,z coordinates)
#For Ma
xa=1.0*AU
ya=0.0
vxa=0.0
vya=30000.0
#For Mb
xb=0.0
yb=0.0
vxb=0.0
vyb=0.0
Explanation: Next, we will need parameters for the simulation. These are known as initial condititons. For a 2 body gravitation problem, we'll need to know the masses of the two objects, the starting posistions of the two objects, and the starting velocities of the two objects.
Below, I've included the initial conditions for the earth (a) and the Sun (b) at the average distance from the sun and the average velocity around the sun. We also need a starting time, and ending time for the simulation, and a "time-step" for the system. Feel free to adjust all of these as you see fit once you have built the system!
<br>
<br>
<br>
<br>
a note on dt:
As already stated, numeric simulations are approximations. In our case, we are approximating how time flows. We know it flows continiously, but the computer cannot work with this. So instead, we break up our time into equal chunks called "dt". The smaller the chunks, the more accurate you will become, but at the cost of computer time.
End of explanation
#Function to compute the force between the two objects
def Fg(Ma,Mb,G,xa,xb,ya,yb):
#Compute rx and ry between Ma and Mb
rx=xb-xa
ry=#Write it in
#compute r^3
r3=#Write in r^3 using the equation above. Make use of np.sqrt()
#Compute the force in Newtons. Use the equations above as a Guide!
fx=-#Write it in
fy=-#Write it in
return #What do we return?
Explanation: It will be nice to create a function for the force between Ma and Mb. Below is the physics for the force of Ma on Mb. How the physics works here is not important for the moment. Right now, I want to make sure you can translate the math shown into a python function. (I'll show a picture of the physics behind this math for those interested.)
$$\vec{F_g}=\frac{-GM_aM_b}{r^3}\vec{r}$$
and
$$\vec{r}=(x_b-x_a)\hat{x}+ (y_b-y_a)\hat{y}$$
$$r^3=((x_b-x_a)^2+(y_b-y_a)^2)^{3/2}$$
If we break Fg into the x and y componets we get:
$$F_x=\frac{-GM_aM_b}{r^3}r_x$$
$$F_y=\frac{-GM_aM_b}{r^3}r_y$$
<br><br>So, $Fg$ will only need to be a function of xa, xb, ya, and yb. The velocities of the bodies will not be needed. Create a function that calculates the force between the bodies given the positions of the bodies. My recommendation here will be to feed the inputs as separate components and also return the force in terms of components (say, fx and fy). This will make your code easier to write and easier to read.
End of explanation
def simulate(Ma,Mb,G,xa,ya,vxa,vya,xb,yb,vxb,vyb):
t=0
#Run a loop for the simulation. Keep track of Ma and Mb posistions and velocites
#Initialize vectors (otherwise there is nothing to append to!)
xaAr=np.array([])
yaAr=np.array([])
vxaAr=np.array([])
vyaAr=np.array([])
xbAr=#Write it in for Particle B
ybAr=#Write it in for Particle B
vxbAr=np.array([])
vybAr=np.array([])
#using while loop method with appending. Can also be done with for loops
while #Write the end condition here.
#Compute current force on Ma and Mb. Ma recieves the opposite force of Mb
fx,fy=Fg(Ma,Mb,G,xa,xb,ya,yb)
#Update the velocities and positions of the particles
vxa=vxa-fx*dt/Ma
vya=#Write it in for y
vxb=#Write it in for x
vyb=vyb+fy*dt/Mb
xa=xa+vxa*dt
ya=#Write it in for y
xb=#Write it in for x
yb=yb+vyb*dt
#Save data to lists
xaAr=np.append(xaAr,xa)
yaAr=np.append(yaAr,ya)
xbAr=#How will we append it here?
ybAr=np.append(ybAr,yb)
#update the time by one time step, dt
t=t+dt
return(xaAr,yaAr,xbAr,ybAr)
Explanation: Now that we have our force function, we will make a new function which does the whole simulation for a set of initial conditions. We call this function 'simulate' and it will take all the initial conditions as inputs. It will loop over each time step and call the force function to find the new positions for the asteroids at each time step.
The first part of our simulate function will be to initialize the loop and choose a loop type, for or while. Below is the general outline for how each type of loop can go.
<br>
<br>
<br>
For loop:
initialize position and velocity arrays with np.zeros or np.linspace for the amount of steps needed to go through the simulation (which is numSteps=(tend-t)/dt the way we have set up the problem). The for loop condition is based off time and should read rough like: for i in range(numSteps)
<br>
<br>
<br>
While loop:
initialize position and velocity arrays with np.array([]) and use np.append() to tact on new values at each step like so, xaArray=np.append(xaArray,NEWVALUE). The while condition should read, while t<tend
My preference here is while since it keeps my calculations and appending separate. But, feel free to use which ever feels best for you!
Now for the actual simulation. This is the hardest part to code in. The general idea behind our loop is that as we step through time, we calculate the force, then calculate the new velocity, then the new position for each particle. At the end, we must update our arrays to reflect the new changes and update the time of the system. The time is super important! If we don't change the time (say in a while loop), the simulation would never end and we would never get our result. :(
Outline for the loop (order matters here)
Calculate the force with the last known positions (use your function!)
Calculate the new velocities using the approximation: vb = vb + dt*fg/Mb and va= va - dt*fg/Ma Note the minus sign here, and the need to do this for the x and y directions!
Calculate the new positions using the approximation: xb = xb + dt*Vb (same for a and for y's. No minus problem here)
Update the arrays to reflect our new values
Update the time using t=t+dt
<br>
<br>
<br>
<br>
Now when the loop closes back in, the cycle repeats in a logical way. Go one step at a time when creating this loop and use comments to help guide yourself. Ask for help if it gets tricky!
End of explanation
#Do simulation with these parameters
xaAr,yaAr,xbAr,ybAr = simulate(Ma,Mb,G,xa,ya,vxa,vya,xb,yb,vxb,#Insert the variable for y position of B particle)
Explanation: Now we will call our simulate function with the initial conditions we defined earlier! We will take the output of simulate and store the x and y positions of the two particles.
End of explanation
from IPython.display import Image
Image("Earth-Sun-averageResult.jpg")
plt.figure()
plt.plot(xaAr/AU,yaAr/AU)
plt.plot(#Add positions for B particle)
plt.show()
Explanation: Now for the fun part (or not so fun part if your simulation has an issue), plot your results! This is something well covered in previous lectures. Show me a plot of (xa,ya) and (xb,yb). Does it look sort of familiar? Hopefully you get something like the below image (in units of AU).
End of explanation
#Mass distribution parameters
Mave=7.0e24 #The average asteroid mass
Msigma=1.0e24 #The standard deviation of asteroid masses
Size=3 #The number of asteroids we wish to simulate
Explanation: Challenge #1: Random Sampling of Initial Simulation Conditions
Now let's try to plot a few different asteroids with different initial conditions at once! Let's first produce the orbits of three asteroids with different masses. Suppose the masses of all asteroids in the main asteroid belt follow a Gaussian distribution. The parameters of the distribution of asteroid masses are defined below.
End of explanation
#Draw 3 masses from normally distributed asteroid mass distribution
MassAr = # Add your normal a.k.a. Gaussian distribution function,
# noting that the input to your numpy random number generator
# function will be: (Size)
Explanation: We now wish to draw a random sample of asteroid masses from this distribution (Hint: Look back at Lecture #3).
End of explanation
plt.figure()
for mass in #What array should we loop over?:
xaAr,yaAr,xbAr,ybAr=simulate(mass,Mb,G,xa,ya,vxa,vya,xb,yb,vyb,vyb)
plt.plot(xaAr/AU,yaAr/AU,label='Mass = %.2e'%mass) #Provide labels for each asteroid mass so we can generate a legend.
#Pro tip: The percent sign replaces '%.2e' in the string with the variable formatted the way we want!
plt.legend()
plt.show()
Explanation: Now let's loop over our random asteroid sample, run simulate and plot the results, for each one!
End of explanation
from IPython.display import Image
Image(filename="fig_example.jpg")
Explanation: Going further:
Can you make a plot with 5 asteroid masses instead of 3?
<b>
If you've got some extra time, now is a great chance to experiment with plotting various initial conditions and how the orbits change! What happens if we draw some random initial velocities instead of random masses, for example?
Challenge #2: Fancy Plotting Fun!
When showing off your results to people unfamiliar with your research, it helps to make them more easy to understand through different visualization techniques (like legends, labels, patterns, different shapes, and sizes). You may have found that textbooks or news articles are more fun and easy when concepts are illustrated colorfully yet clearly, such as the example figure below, which shows different annotations in the form of text:
End of explanation
plt.figure()
plt.plot(xaAr/AU,yaAr/AU,marker='x',linestyle='--',linewidth=1)
plt.plot()#Add positions for B particle
plt.show()
Explanation: Additionally, publications won't always be printed in color, and not all readers have the ability to distinguish colors or text size in the same way, so differences in style improve accessibility as well.
Luckily, Matplotlib can do all of this and more! Let's experiment with some variations in how we can make our plots. We can use the 'marker =' argument in plt.plot to choose a marker for every datapoint. We can use the 'linestyle = ' argument to have a dotted line instead of a solid line. Try experimenting with the extra arguments in the below plotting code to make it look good to you!
End of explanation |
14,785 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Beta Hedging
By Evgenia "Jenny" Nitishinskaya and Delaney Granizo-Mackenzie with example algorithms by David Edwards
Part of the Quantopian Lecture Series
Step1: Now we can perform the regression to find $\alpha$ and $\beta$
Step2: If we plot the line $\alpha + \beta X$, we can see that it does indeed look like the line of best fit
Step3: Risk Exposure
More generally, this beta gets at the concept of how much risk exposure you take on by holding an asset. If an asset has a high beta exposure to the S&P 500, then while it will do very well while the market is rising, it will do very poorly when the market falls. A high beta corresponds to high speculative risk. You are taking out a more volatile bet.
At Quantopian, we value stratgies that have negligible beta exposure to as many factors as possible. What this means is that all of the returns in a strategy lie in the $\alpha$ portion of the model, and are independent of other factors. This is highly desirable, as it means that the strategy is agnostic to market conditions. It will make money equally well in a crash as it will during a bull market. These strategies are the most attractive to individuals with huge cash pools such as endowments and soverign wealth funds.
Risk Management
The process of reducing exposure to other factors is known as risk management. Hedging is one of the best ways to perform risk management in practice.
Hedging
If we determine that our portfolio's returns are dependent on the market via this relation
$$Y_{portfolio} = \alpha + \beta X_{SPY}$$
then we can take out a short position in SPY to try to cancel out this risk. The amount we take out is $-\beta V$ where $V$ is the total value of our portfolio. This works because if our returns are approximated by $\alpha + \beta X_{SPY}$, then adding a short in SPY will make our new returns be $\alpha + \beta X_{SPY} - \beta X_{SPY} = \alpha$. Our returns are now purely alpha, which is independent of SPY and will suffer no risk exposure to the market.
Market Neutral
When a stragy exhibits a consistent beta of 0, we say that this strategy is market neutral.
Problems with Estimation
The problem here is that the beta we estimated is not necessarily going to stay the same as we walk forward in time. As such the amount of short we took out in the SPY may not perfectly hedge our portfolio, and in practice it is quite difficult to reduce beta by a significant amount.
We will talk more about problems with estimating parameters in future lectures. In short, each estimate has a stardard error that corresponds with how stable the estimate is within the observed data.
Implementing hedging
Now that we know how much to hedge, let's see how it affects our returns. We will build our portfolio using the asset and the benchmark, weighing the benchmark by $-\beta$ (negative since we are short in it).
Step4: It looks like the portfolio return follows the asset alone fairly closely. We can quantify the difference in their performances by computing the mean returns and the volatilities (standard deviations of returns) for both
Step5: We've decreased volatility at the expense of some returns. Let's check that the alpha is the same as before, while the beta has been eliminated
Step6: Note that we developed our hedging strategy using historical data. We can check that it is still valid out of sample by checking the alpha and beta values of the asset and the hedged portfolio in a different time frame | Python Code:
# Import libraries
import numpy as np
from statsmodels import regression
import statsmodels.api as sm
import matplotlib.pyplot as plt
import math
# Get data for the specified period and stocks
start = '2014-01-01'
end = '2015-01-01'
asset = get_pricing('TSLA', fields='price', start_date=start, end_date=end)
benchmark = get_pricing('SPY', fields='price', start_date=start, end_date=end)
# We have to take the percent changes to get to returns
# Get rid of the first (0th) element because it is NAN
r_a = asset.pct_change()[1:]
r_b = benchmark.pct_change()[1:]
# Let's plot them just for fun
r_a.plot()
r_b.plot()
plt.ylabel("Daily Return")
plt.legend();
Explanation: Beta Hedging
By Evgenia "Jenny" Nitishinskaya and Delaney Granizo-Mackenzie with example algorithms by David Edwards
Part of the Quantopian Lecture Series:
www.quantopian.com/lectures
github.com/quantopian/research_public
Notebook released under the Creative Commons Attribution 4.0 License.
Factor Models
Factor models are a way of explaining the returns of one asset via a linear combination of the returns of other assets. The general form of a factor model is
$$Y = \alpha + \beta_1 X_1 + \beta_2 X_2 + \dots + \beta_n X_n$$
This looks familiar, as it is exactly the model type that a linear regression fits. The $X$'s can also be indicators rather than assets. An example might be a analyst estimation.
What is Beta?
An asset's beta to another asset is just the $\beta$ from the above model. For instance, if we regressed TSLA against the S&P 500 using the model $Y_{TSLA} = \alpha + \beta X$, then TSLA's beta exposure to the S&P 500 would be that beta. If we used the model $Y_{TSLA} = \alpha + \beta X_{SPY} + \beta X_{AAPL}$, then we now have two betas, one is TSLA's exposure to the S&P 500 and one is TSLA's exposure to AAPL.
Often "beta" will refer to a stock's beta exposure to the S&P 500. We will use it to mean that unless otherwise specified.
End of explanation
# Let's define everything in familiar regression terms
X = r_b.values # Get just the values, ignore the timestamps
Y = r_a.values
def linreg(x,y):
# We add a constant so that we can also fit an intercept (alpha) to the model
# This just adds a column of 1s to our data
x = sm.add_constant(x)
model = regression.linear_model.OLS(y,x).fit()
# Remove the constant now that we're done
x = x[:, 1]
return model.params[0], model.params[1]
alpha, beta = linreg(X,Y)
print 'alpha: ' + str(alpha)
print 'beta: ' + str(beta)
Explanation: Now we can perform the regression to find $\alpha$ and $\beta$:
End of explanation
X2 = np.linspace(X.min(), X.max(), 100)
Y_hat = X2 * beta + alpha
plt.scatter(X, Y, alpha=0.3) # Plot the raw data
plt.xlabel("SPY Daily Return")
plt.ylabel("TSLA Daily Return")
# Add the regression line, colored in red
plt.plot(X2, Y_hat, 'r', alpha=0.9);
Explanation: If we plot the line $\alpha + \beta X$, we can see that it does indeed look like the line of best fit:
End of explanation
# Construct a portfolio with beta hedging
portfolio = -1*beta*r_b + r_a
portfolio.name = "TSLA + Hedge"
# Plot the returns of the portfolio as well as the asset by itself
portfolio.plot(alpha=0.9)
r_b.plot(alpha=0.5);
r_a.plot(alpha=0.5);
plt.ylabel("Daily Return")
plt.legend();
Explanation: Risk Exposure
More generally, this beta gets at the concept of how much risk exposure you take on by holding an asset. If an asset has a high beta exposure to the S&P 500, then while it will do very well while the market is rising, it will do very poorly when the market falls. A high beta corresponds to high speculative risk. You are taking out a more volatile bet.
At Quantopian, we value stratgies that have negligible beta exposure to as many factors as possible. What this means is that all of the returns in a strategy lie in the $\alpha$ portion of the model, and are independent of other factors. This is highly desirable, as it means that the strategy is agnostic to market conditions. It will make money equally well in a crash as it will during a bull market. These strategies are the most attractive to individuals with huge cash pools such as endowments and soverign wealth funds.
Risk Management
The process of reducing exposure to other factors is known as risk management. Hedging is one of the best ways to perform risk management in practice.
Hedging
If we determine that our portfolio's returns are dependent on the market via this relation
$$Y_{portfolio} = \alpha + \beta X_{SPY}$$
then we can take out a short position in SPY to try to cancel out this risk. The amount we take out is $-\beta V$ where $V$ is the total value of our portfolio. This works because if our returns are approximated by $\alpha + \beta X_{SPY}$, then adding a short in SPY will make our new returns be $\alpha + \beta X_{SPY} - \beta X_{SPY} = \alpha$. Our returns are now purely alpha, which is independent of SPY and will suffer no risk exposure to the market.
Market Neutral
When a stragy exhibits a consistent beta of 0, we say that this strategy is market neutral.
Problems with Estimation
The problem here is that the beta we estimated is not necessarily going to stay the same as we walk forward in time. As such the amount of short we took out in the SPY may not perfectly hedge our portfolio, and in practice it is quite difficult to reduce beta by a significant amount.
We will talk more about problems with estimating parameters in future lectures. In short, each estimate has a stardard error that corresponds with how stable the estimate is within the observed data.
Implementing hedging
Now that we know how much to hedge, let's see how it affects our returns. We will build our portfolio using the asset and the benchmark, weighing the benchmark by $-\beta$ (negative since we are short in it).
End of explanation
print "means: ", portfolio.mean(), r_a.mean()
print "volatilities: ", portfolio.std(), r_a.std()
Explanation: It looks like the portfolio return follows the asset alone fairly closely. We can quantify the difference in their performances by computing the mean returns and the volatilities (standard deviations of returns) for both:
End of explanation
P = portfolio.values
alpha, beta = linreg(X,P)
print 'alpha: ' + str(alpha)
print 'beta: ' + str(beta)
Explanation: We've decreased volatility at the expense of some returns. Let's check that the alpha is the same as before, while the beta has been eliminated:
End of explanation
# Get the alpha and beta estimates over the last year
start = '2014-01-01'
end = '2015-01-01'
asset = get_pricing('TSLA', fields='price', start_date=start, end_date=end)
benchmark = get_pricing('SPY', fields='price', start_date=start, end_date=end)
r_a = asset.pct_change()[1:]
r_b = benchmark.pct_change()[1:]
X = r_b.values
Y = r_a.values
historical_alpha, historical_beta = linreg(X,Y)
print 'Asset Historical Estimate:'
print 'alpha: ' + str(historical_alpha)
print 'beta: ' + str(historical_beta)
# Get data for a different time frame:
start = '2015-01-01'
end = '2015-06-01'
asset = get_pricing('TSLA', fields='price', start_date=start, end_date=end)
benchmark = get_pricing('SPY', fields='price', start_date=start, end_date=end)
# Repeat the process from before to compute alpha and beta for the asset
r_a = asset.pct_change()[1:]
r_b = benchmark.pct_change()[1:]
X = r_b.values
Y = r_a.values
alpha, beta = linreg(X,Y)
print 'Asset Out of Sample Estimate:'
print 'alpha: ' + str(alpha)
print 'beta: ' + str(beta)
# Create hedged portfolio and compute alpha and beta
portfolio = -1*historical_beta*r_b + r_a
P = portfolio.values
alpha, beta = linreg(X,P)
print 'Portfolio Out of Sample:'
print 'alpha: ' + str(alpha)
print 'beta: ' + str(beta)
# Plot the returns of the portfolio as well as the asset by itself
portfolio.name = "TSLA + Hedge"
portfolio.plot(alpha=0.9)
r_a.plot(alpha=0.5);
r_b.plot(alpha=0.5)
plt.ylabel("Daily Return")
plt.legend();
Explanation: Note that we developed our hedging strategy using historical data. We can check that it is still valid out of sample by checking the alpha and beta values of the asset and the hedged portfolio in a different time frame:
End of explanation |
14,786 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Load the data from our JSON file.
The data is stored as a dictionary of dictionaries in the json file. We store it that way beacause it's easy to add data to the existing master data file. Also, I haven't figured out how to get it in a database yet.
Step2: Clean up the data a bit
Right now the 'shared' and 'split' are included in number of bathrooms. If I were to convert that to a number I would consider a shared/split bathroom to be half or 0.5 of a bathroom.
Step3: It looks like Portland!!!
Let's cluster the data. Start by creating a list of [['lat','long'], ...]
Step4: We'll use K Means Clustering because that's the clustering method I recently learned in class! There may be others that work better, but this is the tool that I know
Step5: We chose our neighborhoods!
I've found that every once in a while the centers end up in different points, but are fairly consistant. Now let's process our data points and figure out where the closest neighborhood center is to it!
Step6: Create a function that will label each point with a number coresponding to it's neighborhood
Step7: Here's the new Part. We're breaking out the neighborhood values into their own columns. Now the algorithms can read them as categorical data rather than continuous data.
Step8: Ok, lets put it through Decision Tree!
What about Random Forest?
Step9: Wow! up to .87! That's our best yet! What if we add more trees???
Step10: Up to .88!
So what is our goal now? I'd like to see if adjusting the number of neighborhoods increases the accuracy. same for the affect with the number of trees
Step11: Looks like the optimum is right around 10 or 11, and then starts to drop off. Let's get a little more granular and look at a smaller range
Step12: Trying a few times, it looks like 10, 11 and 12 get the best results at ~.85. Of course, we'll need to redo some of these optomizations after we properly process our data. Hopefully we'll see some more consistency then too. | Python Code:
with open('../pipeline/data/ProcessedDay90ApartmentData.json') as g:
my_dict2 = json.load(g)
dframe2 = DataFrame(my_dict2)
dframe2 = dframe2.T
dframe2 = dframe2[['content', 'laundry', 'price', 'dog', 'bed',
'bath', 'feet', 'long', 'parking', 'lat', 'smoking', 'getphotos',
'cat', 'hasmap', 'wheelchair', 'housingtype']]
dframe2.describe()
dframe = pd.get_dummies(dframe2, columns = ['laundry', 'parking', 'smoking', 'wheelchair', 'housingtype'])
pd.set_option('display.max_columns', 500)
dframe = df
from sklearn.cross_validation import train_test_split, ShuffleSplit
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import accuracy_score as acc
dframe.describe(include='all')
from sklearn.cross_validation import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(
# dframe.drop('price', axis = 1), dframe.price, test_size=0.33)
# print X_train.shape
# print y_train.shape
from sklearn.ensemble import RandomForestRegressor
reg = RandomForestRegressor()
reg.fit(X_train, y_train)
print reg.score(X_test, y_test)
scores = []
for thous in range(1000,len(dframe),1000):
temp_dframe = dframe[:thous]
X_train, X_test, y_train, y_test = train_test_split(
temp_dframe.drop('price', axis = 1), temp_dframe.price, test_size=0.33)
reg = RandomForestRegressor()
reg.fit(X_train,y_train)
pred = reg.predict(X_test)
# pred = [float(x) for x in pred]
# y_test = [float(x) for x in y_test]
score = acc(pred, np.array(y_test))
scores.append(score)
scores
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
plt.plot(range(1000,len(dframe),1000),scores)
def listing_cleaner(entry):
print entry
df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
'C': [1, 2, 3]})
df
pd.get_dummies(df, columns=['A','C'])
listing_cleaner(my_dict['5465197037'])
type(dframe['bath']['5399866740'])
Explanation: Load the data from our JSON file.
The data is stored as a dictionary of dictionaries in the json file. We store it that way beacause it's easy to add data to the existing master data file. Also, I haven't figured out how to get it in a database yet.
End of explanation
dframe.bath = dframe.bath.replace('shared',0.5)
dframe.bath = dframe.bath.replace('split',0.5)
dframe.smoking = dframe.smoking.replace(np.nan, 'smoking')
dframe.furnished = dframe.furnished.replace(np.nan,'not furnished')
dframe.wheelchair = dframe.wheelchair.replace(np.nan, 'not wheelchair accessible')
dframe.describe()
dframe.bed.unique()
from sklearn.preprocessing import Imputer, LabelEncoder
def meanimputer(column):
imp = Imputer(missing_values='NaN', strategy='mean', axis=1)
imp.fit(column)
X = imp.transform(column)
return X[0]
arr = np.array([np.nan, 'house', 'boat', 'houseboat', 'house', np.nan, 'house','houseboat'])
prac_df = DataFrame()
prac_df['arr'] = arr
prac_df['arr']
modeimputer(prac_df['arr'])
def modeimputer(column):
le = LabelEncoder()
column = le.fit_transform(column)
print le.classes_
print type(le.classes_[0])
print column
nan = le.transform([np.nan])[0]
print nan
print type(column)
column = list(column)
for _,i in enumerate(column):
if i == nan:
column[_] = np.nan
imp = Imputer(missing_values='NaN', strategy='most_frequent', axis=1)
imp.fit(column)
X = imp.transform(column)
for _,i in enumerate(X[0]):
if np.isnan(i):
X[_] = 0
X = X.astype(int)
Y = le.inverse_transform(X)
return Y
import pandas as pd
import numpy as np
from sklearn.base import TransformerMixin
class ModeImputer(TransformerMixin):
def __init__(self):
Impute missing values.
Columns of dtype object are imputed with the most frequent value
in column.
Columns of other types are imputed with mean of column.
Credit:http://stackoverflow.com/questions/25239958/
impute-categorical-missing-values-in-scikit-learn
def fit(self, X, y=None):
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O') else X[c].mean() for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.fill)
data = [
['a', 1, 2],
['b', 1, 1],
['b', 2, 2],
[np.nan, np.nan, np.nan]
]
X = pd.DataFrame(data)
xt = ModeImputer().fit_transform(X)
print('before...')
print(X)
print('after...')
print(xt)
dframe = ModeImputer().fit_transform(dframe)
dframe.head()
dframe.describe(include = 'all')
dframe.bed.mean()
dframe.parking.unique()
u_dframe = DataFrame()
dframe['bath'] = meanimputer(dframe['bath'])
dframe['bed'] = meanimputer(dframe['bed'])
dframe['feet'] = meanimputer(dframe['feet'])
dframe['lat'] = meanimputer(dframe['lat'])
dframe['long'] = meanimputer(dframe['long'])
dframe.head()
dframe.describe(include='all')
data = dframe[dframe.lat > 45.4][dframe.lat < 45.6][dframe.long < -122.0][dframe.long > -123.5]
plt.figure(figsize=(15,10))
plt.scatter(data = data, x = 'long',y='lat')
Explanation: Clean up the data a bit
Right now the 'shared' and 'split' are included in number of bathrooms. If I were to convert that to a number I would consider a shared/split bathroom to be half or 0.5 of a bathroom.
End of explanation
XYdf = dframe[dframe.lat > 45.4][dframe.lat < 45.6][dframe.long < -122.0][dframe.long > -123.5]
data = [[XYdf['lat'][i],XYdf['long'][i]] for i in XYdf.index]
Explanation: It looks like Portland!!!
Let's cluster the data. Start by creating a list of [['lat','long'], ...]
End of explanation
from sklearn.cluster import KMeans
km = KMeans(n_clusters=40)
km.fit(data)
neighborhoods = km.cluster_centers_
%pylab inline
figure(1,figsize=(20,12))
plot([row[1] for row in data],[row[0] for row in data],'b.')
for i in km.cluster_centers_:
plot(i[1],i[0], 'g*',ms=25)
'''Note to Riley: come back and make it look pretty'''
Explanation: We'll use K Means Clustering because that's the clustering method I recently learned in class! There may be others that work better, but this is the tool that I know
End of explanation
neighborhoods = neighborhoods.tolist()
for i in enumerate(neighborhoods):
i[1].append(i[0])
print neighborhoods
Explanation: We chose our neighborhoods!
I've found that every once in a while the centers end up in different points, but are fairly consistant. Now let's process our data points and figure out where the closest neighborhood center is to it!
End of explanation
def clusterer(X, Y,neighborhoods):
neighbors = []
for i in neighborhoods:
distance = ((i[0]-X)**2 + (i[1]-Y)**2)
neighbors.append(distance)
closest = min(neighbors)
return neighbors.index(closest)
neighborhoodlist = []
for i in dframe.index:
neighborhoodlist.append(clusterer(dframe['lat'][i],dframe['long'][i],neighborhoods))
dframe['neighborhood'] = neighborhoodlist
dframe
Explanation: Create a function that will label each point with a number coresponding to it's neighborhood
End of explanation
from sklearn import preprocessing
def CategoricalToBinary(dframe,column_name):
le = preprocessing.LabelEncoder()
listy = le.fit_transform(dframe[column_name])
dframe[column_name] = listy
unique = dframe[column_name].unique()
serieslist = [list() for _ in xrange(len(unique))]
for column, _ in enumerate(serieslist):
for i, item in enumerate(dframe[column_name]):
if item == column:
serieslist[column].append(1)
else:
serieslist[column].append(0)
dframe[column_name+str(column)] = serieslist[column]
return dframe
pd.set_option('max_columns', 100)
dframe = CategoricalToBinary(dframe,'housingtype')
dframe = CategoricalToBinary(dframe,'parking')
dframe = CategoricalToBinary(dframe,'laundry')
dframe = CategoricalToBinary(dframe,'smoking')
dframe = CategoricalToBinary(dframe,'wheelchair')
dframe = CategoricalToBinary(dframe,'neighborhood')
dframe
dframe = dframe.drop('date',1)
dframe = dframe.drop('housingtype',1)
dframe = dframe.drop('parking',1)
dframe = dframe.drop('laundry',1)
dframe = dframe.drop('smoking',1)
dframe = dframe.drop('wheelchair',1)
dframe = dframe.drop('neighborhood',1)
dframe = dframe.drop('time',1)
columns=list(dframe.columns)
from __future__ import division
print len(dframe)
df2 = dframe[dframe.price < 10000][columns].dropna()
print len(df2)
print len(df2)/len(dframe)
price = df2[['price']].values
columns.pop(columns.index('price'))
features = df2[columns].values
from sklearn.cross_validation import train_test_split
features_train, features_test, price_train, price_test = train_test_split(features, price, test_size=0.1, random_state=42)
Explanation: Here's the new Part. We're breaking out the neighborhood values into their own columns. Now the algorithms can read them as categorical data rather than continuous data.
End of explanation
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
reg = RandomForestRegressor()
reg = reg.fit(features_train, price_train)
forest_pred = reg.predict(features_test)
forest_pred = np.array([[item] for item in forest_pred])
print r2_score(forest_pred, price_test)
plt.scatter(forest_pred,price_test)
df2['predictions'] = reg.predict(df2[columns])
df2['predictions_diff'] = df2['predictions']-df2['price']
sd = np.std(df2['predictions_diff'])
sns.kdeplot(df2['predictions_diff'][df2['predictions_diff']>-150][df2['predictions_diff']<150])
sns.plt.xlim(-150,150)
data = df2[dframe.lat > 45.45][df2.lat < 45.6][df2.long < -122.4][df2.long > -122.8][df2['predictions_diff']>-150][df2['predictions_diff']<150]
plt.figure(figsize=(15,10))
plt.scatter(data = data, x = 'long',y='lat', c = 'predictions_diff',s=10,cmap='coolwarm')
dframe
print np.mean([1,2,34,np.nan])
def averager(dframe):
dframe = dframe.T
dframe.dropna()
averages = {}
for listing in dframe:
try:
key = str(dframe[listing]['bed'])+','+str(dframe[listing]['bath'])+','+str(dframe[listing]['neighborhood'])+','+str(dframe[listing]['feet']-dframe[listing]['feet']%50)
if key not in averages:
averages[key] = {'average_list':[dframe[listing]['price']], 'average':0}
elif key in averages:
averages[key]['average_list'].append(dframe[listing]['price'])
except TypeError:
continue
for entry in averages:
averages[entry]['average'] = np.mean(averages[entry]['average_list'])
return averages
averages = averager(dframe)
print averages
dframe['averages']= averages[str(dframe['bed'])+','+str(dframe['bath'])+','+str(dframe['neighborhood'])+','+str(dframe['feet']-dframe['feet']%50)]
dframe.T
Explanation: Ok, lets put it through Decision Tree!
What about Random Forest?
End of explanation
reg = RandomForestRegressor(n_estimators = 100)
reg = reg.fit(features_train, price_train)
forest_pred = reg.predict(features_test)
forest_pred = np.array([[item] for item in forest_pred])
print r2_score(forest_pred, price_test)
print plt.scatter(pred,price_test)
from sklearn.tree import DecisionTreeRegressor
reg = DecisionTreeRegressor(max_depth = 5)
reg.fit(features_train, price_train)
print len(features_train[0])
columns = [str(x) for x in columns]
print columns
from sklearn.tree import export_graphviz
export_graphviz(reg,feature_names=columns)
Explanation: Wow! up to .87! That's our best yet! What if we add more trees???
End of explanation
def neighborhood_optimizer(dframe,neighborhood_number_range, counter_num):
XYdf = dframe[dframe.lat > 45.4][dframe.lat < 45.6][dframe.long < -122.0][dframe.long > -123.5]
data = [[XYdf['lat'][i],XYdf['long'][i]] for i in XYdf.index]
r2_dict = []
for i in neighborhood_number_range:
counter = counter_num
average_accuracy_list = []
while counter > 0:
km = KMeans(n_clusters=i)
km.fit(data)
neighborhoods = km.cluster_centers_
neighborhoods = neighborhoods.tolist()
for x in enumerate(neighborhoods):
x[1].append(x[0])
neighborhoodlist = []
for z in dframe.index:
neighborhoodlist.append(clusterer(dframe['lat'][z],dframe['long'][z],neighborhoods))
dframecopy = dframe.copy()
dframecopy['neighborhood'] = Series((neighborhoodlist), index=dframe.index)
df2 = dframecopy[dframe.price < 10000][['bath','bed','feet','dog','cat','content','getphotos', 'hasmap', 'price','neighborhood']].dropna()
features = df2[['bath','bed','feet','dog','cat','content','getphotos', 'hasmap', 'neighborhood']].values
price = df2[['price']].values
features_train, features_test, price_train, price_test = train_test_split(features, price, test_size=0.1)
reg = RandomForestRegressor()
reg = reg.fit(features_train, price_train)
forest_pred = reg.predict(features_test)
forest_pred = np.array([[item] for item in forest_pred])
counter -= 1
average_accuracy_list.append(r2_score(forest_pred, price_test))
total = 0
for entry in average_accuracy_list:
total += entry
r2_accuracy = total/len(average_accuracy_list)
r2_dict.append((i,r2_accuracy))
print r2_dict
return r2_dict
neighborhood_number_range = [i for _,i in enumerate(range(2,31,2))]
neighborhood_number_range
r2_dict = neighborhood_optimizer(dframe,neighborhood_number_range,10)
r2_dict[:][0]
plt.scatter([x[0] for x in r2_dict],[x[1] for x in r2_dict])
Explanation: Up to .88!
So what is our goal now? I'd like to see if adjusting the number of neighborhoods increases the accuracy. same for the affect with the number of trees
End of explanation
neighborhood_number_range = [i for _,i in enumerate(range(7,15))]
neighborhood_number_range
r2_dict = neighborhood_optimizer(dframe,neighborhood_number_range,10)
print r2_dict
plt.scatter([x[0] for x in r2_dict],[x[1] for x in r2_dict])
Explanation: Looks like the optimum is right around 10 or 11, and then starts to drop off. Let's get a little more granular and look at a smaller range
End of explanation
r2_dict = neighborhood_optimizer(dframe,[10,11,12],25)
Explanation: Trying a few times, it looks like 10, 11 and 12 get the best results at ~.85. Of course, we'll need to redo some of these optomizations after we properly process our data. Hopefully we'll see some more consistency then too.
End of explanation |
14,787 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Creating Ukulele Chord Diagrams in SVG with Python
With the Python modul uchord you can create ukulele chord diagrams in SVG format.
Step1: <img src="pic/c.svg" align="left"><br><br><br><br><br>
If you like it, use it. If you have some suggestions, tell me ([email protected]).
Thanks
Special thanks to the project https
Step2: That should work.
Changelog
|Version | |
|--------------|------------------------------------------------------------------------------------------|
| 0.1.0 | To start with|
Example
The ukulele chord diagrams are a picture of the fretboard.
The four strings are the vertical lines. The dots are the places where your fingers should be.
<img src="pic/f.svg" align="left"><br><br><br><br><br>
For example the F major chord. You have to put one finger at the second fret of the first string and one at the first fret of the third string. String two and four are empty.
The number 2010 represents the F chord. Every digit stands for a string. This representation is used in uchord, too.
Step3: If you want to specify which finger should be used for which fret, use the paremeter fingers. For an empty string stands the underline.
Step4: <img src="pic/ff.svg" align="left"><br><br><br><br><br>
For marking the root note or other texts under the chord you can use the parmeter subtexts
Step5: <img src="pic/ffs.svg" align="left"><br><br><br><br><br>
If a chord is played higher up the fret, you can specify the fret to start with. Parameter starting_fret
Step6: <img src="pic/dm7.svg" align="left"><br><br><br><br><br>
Using Class Chord and Chords
The modul uchord has the class Chord which reprents a specific chord.
Step7: With the method to_svg you get the svg string
Step8: The class Chords stands for a list of chords.
Step9: With IPython and Jupyter
With IPython.display you can use the chords in a notebook.
Step10: You can define your magic command for chords
Step11: to use | Python Code:
import uchord
uchord.write_chord('c.svg','C','0003')
Explanation: Creating Ukulele Chord Diagrams in SVG with Python
With the Python modul uchord you can create ukulele chord diagrams in SVG format.
End of explanation
pip install uchord
Explanation: <img src="pic/c.svg" align="left"><br><br><br><br><br>
If you like it, use it. If you have some suggestions, tell me ([email protected]).
Thanks
Special thanks to the project https://github.com/pianosnake/uke-chord where I learned much about
SVG and chord diagrams. I recycled some of the svg commands.
Installation
First you need Python 3 (https://www.python.org, ) - Python 3.6 should work, because it's the development environment
And this modul uchord - simply copy the source
Or try
End of explanation
import uchord
uchord.write_chord('f.svg','F','2010')
Explanation: That should work.
Changelog
|Version | |
|--------------|------------------------------------------------------------------------------------------|
| 0.1.0 | To start with|
Example
The ukulele chord diagrams are a picture of the fretboard.
The four strings are the vertical lines. The dots are the places where your fingers should be.
<img src="pic/f.svg" align="left"><br><br><br><br><br>
For example the F major chord. You have to put one finger at the second fret of the first string and one at the first fret of the third string. String two and four are empty.
The number 2010 represents the F chord. Every digit stands for a string. This representation is used in uchord, too.
End of explanation
uchord.write_chord('f.svg','F','2010',fingers='2_1_')
Explanation: If you want to specify which finger should be used for which fret, use the paremeter fingers. For an empty string stands the underline.
End of explanation
uchord.write_chord('f.svg','F','2010',fingers='2_1_',subtexts='__R_')
Explanation: <img src="pic/ff.svg" align="left"><br><br><br><br><br>
For marking the root note or other texts under the chord you can use the parmeter subtexts
End of explanation
uchord.write_chord('dm7.svg','Dm7','7988',fingers='1423',starting_fret=6)
Explanation: <img src="pic/ffs.svg" align="left"><br><br><br><br><br>
If a chord is played higher up the fret, you can specify the fret to start with. Parameter starting_fret
End of explanation
from uchord import Chord
c = Chord('F','2010',fingers='2_1_',subtexts='__R_')
Explanation: <img src="pic/dm7.svg" align="left"><br><br><br><br><br>
Using Class Chord and Chords
The modul uchord has the class Chord which reprents a specific chord.
End of explanation
svg = c.to_svg()
Explanation: With the method to_svg you get the svg string
End of explanation
from uchord import Chords
c = Chords([Chord("F7","2313",fingers="2314",subtexts="__R_"),
Chord("C7","0001",fingers="___1",subtexts="_R__"),
Chord("G7","0212",fingers="_213",subtexts="R___"),
Chord("A7","0100",fingers="_1__",subtexts="___R")])
svg = c.to_svg()
Explanation: The class Chords stands for a list of chords.
End of explanation
from IPython.display import SVG, display
from uchord import Chords
c = Chords([Chord("F7","2313",fingers="2314",subtexts="__R_"),
Chord("C7","0001",fingers="___1",subtexts="_R__"),
Chord("G7","0212",fingers="_213",subtexts="R___"),
Chord("A7","0100",fingers="_1__",subtexts="___R")])
display(SVG(c.to_svg()))
Explanation: With IPython and Jupyter
With IPython.display you can use the chords in a notebook.
End of explanation
from IPython.core.magic import register_cell_magic
from IPython.display import SVG, display
from uchord import Chord, Chords
@register_cell_magic
def uchord(line, cell):
lines = cell.splitlines()
chordlist = []
name = ""
frets = ""
starting_fret=1
fingers=""
subtexts=""
for l in lines:
tokens = l.split(',')
name = tokens[0].strip()
frets = tokens[1].strip()
for t in tokens[2:]:
arg = t.split('=')
argname = arg[0].strip()
if argname.upper() == "FINGERS":
fingers = arg[1].strip()
elif argname.upper() == "SUBTEXTS":
subtexts = arg[1].strip()
elif argname.upper() == "STARTING_FRET":
starting_fret = int(arg[1])
chordlist.append(Chord(name, frets, fingers=fingers, subtexts=subtexts, starting_fret=starting_fret))
return display(SVG(Chords(chordlist).to_svg()))
Explanation: You can define your magic command for chords
End of explanation
%%uchord
A7, 0100, fingers=_1__, subtexts=___R
G7, 0212, fingers=_213, subtexts=R__3
F7, 2313, fingers=2314, subtexts=__R5
C7, 0001, fingers=___1, subtexts=_R_7
Explanation: to use
End of explanation |
14,788 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
The largest prime (so far)
A new record for the largest prime has been found lately. Let explore this number and know more about it and how to deal with it using some Python, Numpy and finally using we will take a loog at Cython and GMP.
Step1: This number of was found in 2016 using the Great Internet Mersenne Prime Search (GIMPS). This prime is one of the Mersenne Prime whice are of the defined by the following formula
Step2: The smallest none-prime (composite) number is $2^{11}-1=2047$ which is a composite number.
Step3: The largest prime number is that was found lately is
Step4: Lucas–Lehmer test
A very easy way to test a Mersenne Prime is the Lucas-Lehmer test which uses the Lucas series.
$4, 14, 37634 ... $
The series starts with 4 then uses the last value in this formula
Step5: The test is if the Mersenne Prime number of $n^2-1$ is a divided by $S_{n-2}$ you should get a remaining of 0. The problem is this series goes very large very fast. So there is another way to do this test. | Python Code:
import numpy as np
import math
from datetime import datetime
%load_ext Cython
Explanation: The largest prime (so far)
A new record for the largest prime has been found lately. Let explore this number and know more about it and how to deal with it using some Python, Numpy and finally using we will take a loog at Cython and GMP.
End of explanation
2**2-1
2**3-1
2**5-1
2**7-1
Explanation: This number of was found in 2016 using the Great Internet Mersenne Prime Search (GIMPS). This prime is one of the Mersenne Prime whice are of the defined by the following formula:
$$2^p-1$$
Where $p$ is a prime number. This is called a psudo-prime meaning not all Mersenne Prime are primes.
$2^2-1=3$
$2^3-1=7$
$2^5-1=31$
$2^7-1=127$ ...
End of explanation
2**11-1
Explanation: The smallest none-prime (composite) number is $2^{11}-1=2047$ which is a composite number.
End of explanation
p = 74207281
the_number = (2 ** p) - 1
Explanation: The largest prime number is that was found lately is:
$$2^{74,207,281}-1$$
The reason why I didn't put the actual value is that the number if very large (over 22 million digits).
End of explanation
S = 4
print(S)
print(len(str(S)))
S = S ** 2 - 2
print(S)
print(len(str(S)), "digits")
S = S ** 2 - 2
print(S)
print(len(str(S)), "digits")
S = S ** 2 - 2
print(S)
print(len(str(S)), "digits")
S = S ** 2 - 2
print(S)
print(len(str(S)), "digits")
S = S ** 2 - 2
print(S)
print(len(str(S)), "digits")
# The largest prime (so far)
the_number = 2 ** 74207281 - 1
print(int(math.log10(the_number))+1, "digits")
Explanation: Lucas–Lehmer test
A very easy way to test a Mersenne Prime is the Lucas-Lehmer test which uses the Lucas series.
$4, 14, 37634 ... $
The series starts with 4 then uses the last value in this formula:
$S_i=S_{i-1}^2-2$
End of explanation
p = 74207281
the_number = (2 ** p) - 1
S = 4
time_stamp = datetime.now()
for i in range(p-2):
S = (S ** 2 - 2) % the_number
if i % 1 == 0:
print(i, datetime.now() - time_stamp,"")
time_stamp = datetime.now()
if S == 0:
print("PRIME")
%%cython
cdef unsigned long p = 61
cdef unsigned long P = (2 ** p) - 1
S = 4
for i in range(p-2):
S = S ** 2 - 2
S = S % P
if i % 10 == 0:
print(i)
if S == 0:
print("PRIME")
%%cython --link-args=-lgmp
cdef extern from "gmp.h":
ctypedef struct mpz_t:
pass
ctypedef unsigned long mp_bitcnt_t
cdef void mpz_init(mpz_t)
cdef void mpz_init_set_ui(mpz_t, unsigned int)
cdef void mpz_add(mpz_t, mpz_t, mpz_t)
cdef void mpz_add_ui(mpz_t, const mpz_t, unsigned long int)
cdef void mpz_sub (mpz_t, const mpz_t, const mpz_t)
cdef void mpz_sub_ui (mpz_t, const mpz_t, unsigned long int)
cdef void mpz_ui_sub (mpz_t, unsigned long int, const mpz_t)
cdef void mpz_mul (mpz_t, const mpz_t, const mpz_t)
cdef void mpz_mul_si (mpz_t, const mpz_t, long int)
cdef void mpz_mul_ui (mpz_t, const mpz_t, unsigned long int)
cdef void mpz_mul_2exp (mpz_t, const mpz_t, mp_bitcnt_t)
cdef void mpz_mod (mpz_t, const mpz_t, const mpz_t)
cdef unsigned long int mpz_get_ui(const mpz_t)
#cdef unsigned long p = 61
cdef mp_bitcnt_t p = 74207281
cdef mpz_t t # = 1
cdef mpz_t a # = 1
cdef mpz_t P # = (2 ** p) - 1
cdef mpz_t S # = 4
mpz_init(t)
mpz_init_set_ui(t, 1)
mpz_init(a)
mpz_init_set_ui(a, 2)
mpz_init(P)
mpz_mul_2exp(P,t,p)
mpz_sub_ui(P,P,1)
mpz_init(S)
mpz_init_set_ui(S, 4)
for i in range(p-2):
#S = S ** 2 - 2
mpz_mul(S,S,S)
mpz_sub_ui(S,S,2)
#S = S % P
mpz_mod(S,S,P)
if i % 1000 == 0:
print(i)
if mpz_get_ui(S) == 0:
print("PRIME")
else:
print("COMPOSITE")
#print(mpz_get_ui(P))
Explanation: The test is if the Mersenne Prime number of $n^2-1$ is a divided by $S_{n-2}$ you should get a remaining of 0. The problem is this series goes very large very fast. So there is another way to do this test.
End of explanation |
14,789 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
On this notebook the best models and input parameters will be searched for. The problem at hand is predicting the price of any stock symbol 28 days ahead, assuming one model for all the symbols. The best training period length, base period length, and base period step will be determined, using the MRE metrics (and/or the R^2 metrics). The step for the rolling validation will be determined taking into consideration a compromise between having enough points (I consider about 1000 different target days may be good enough), and the time needed to compute the validation.
Step1: Let's get the data.
Step2: Let's find the best params set for some different models
- Dummy Predictor (mean)
Step3: - Linear Predictor
Step4: - Random Forest model | Python Code:
# Basic imports
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
from time import time
from sklearn.metrics import r2_score, median_absolute_error
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20.0, 10.0)
%load_ext autoreload
%autoreload 2
sys.path.append('../../')
import predictor.feature_extraction as fe
import utils.preprocessing as pp
import utils.misc as misc
AHEAD_DAYS = 28
Explanation: On this notebook the best models and input parameters will be searched for. The problem at hand is predicting the price of any stock symbol 28 days ahead, assuming one model for all the symbols. The best training period length, base period length, and base period step will be determined, using the MRE metrics (and/or the R^2 metrics). The step for the rolling validation will be determined taking into consideration a compromise between having enough points (I consider about 1000 different target days may be good enough), and the time needed to compute the validation.
End of explanation
datasets_params_list_df = pd.read_pickle('../../data/datasets_params_list_df.pkl')
print(datasets_params_list_df.shape)
datasets_params_list_df.head()
train_days_arr = 252 * np.array([1, 2, 3])
params_list_df = pd.DataFrame()
for train_days in train_days_arr:
temp_df = datasets_params_list_df[datasets_params_list_df['ahead_days'] == AHEAD_DAYS].copy()
temp_df['train_days'] = train_days
params_list_df = params_list_df.append(temp_df, ignore_index=True)
print(params_list_df.shape)
params_list_df.head()
Explanation: Let's get the data.
End of explanation
tic = time()
from predictor.dummy_mean_predictor import DummyPredictor
PREDICTOR_NAME = 'dummy'
# Global variables
eval_predictor = DummyPredictor()
step_eval_days = 60 # The step to move between training/validation pairs
params = {'eval_predictor': eval_predictor, 'step_eval_days': step_eval_days}
results_df = misc.parallelize_dataframe(params_list_df, misc.apply_mean_score_eval, params)
results_df['r2'] = results_df.apply(lambda x: x['scores'][0], axis=1)
results_df['mre'] = results_df.apply(lambda x: x['scores'][1], axis=1)
# Pickle that!
results_df.to_pickle('../../data/results_ahead{}_{}_df.pkl'.format(AHEAD_DAYS, PREDICTOR_NAME))
results_df['mre'].plot()
print('Minimum MRE param set: \n {}'.format(results_df.iloc[np.argmin(results_df['mre'])]))
print('Maximum R^2 param set: \n {}'.format(results_df.iloc[np.argmax(results_df['r2'])]))
toc = time()
print('Elapsed time: {} seconds.'.format((toc-tic)))
Explanation: Let's find the best params set for some different models
- Dummy Predictor (mean)
End of explanation
tic = time()
from predictor.linear_predictor import LinearPredictor
PREDICTOR_NAME = 'linear'
# Global variables
eval_predictor = LinearPredictor()
step_eval_days = 60 # The step to move between training/validation pairs
params = {'eval_predictor': eval_predictor, 'step_eval_days': step_eval_days}
results_df = misc.parallelize_dataframe(params_list_df, misc.apply_mean_score_eval, params)
results_df['r2'] = results_df.apply(lambda x: x['scores'][0], axis=1)
results_df['mre'] = results_df.apply(lambda x: x['scores'][1], axis=1)
# Pickle that!
results_df.to_pickle('../../data/results_ahead{}_{}_df.pkl'.format(AHEAD_DAYS, PREDICTOR_NAME))
results_df['mre'].plot()
print('Minimum MRE param set: \n {}'.format(results_df.iloc[np.argmin(results_df['mre'])]))
print('Maximum R^2 param set: \n {}'.format(results_df.iloc[np.argmax(results_df['r2'])]))
toc = time()
print('Elapsed time: {} seconds.'.format((toc-tic)))
Explanation: - Linear Predictor
End of explanation
tic = time()
from predictor.random_forest_predictor import RandomForestPredictor
PREDICTOR_NAME = 'random_forest'
# Global variables
eval_predictor = RandomForestPredictor()
step_eval_days = 60 # The step to move between training/validation pairs
params = {'eval_predictor': eval_predictor, 'step_eval_days': step_eval_days}
results_df = misc.parallelize_dataframe(params_list_df, misc.apply_mean_score_eval, params)
results_df['r2'] = results_df.apply(lambda x: x['scores'][0], axis=1)
results_df['mre'] = results_df.apply(lambda x: x['scores'][1], axis=1)
# Pickle that!
results_df.to_pickle('../../data/results_ahead{}_{}_df.pkl'.format(AHEAD_DAYS, PREDICTOR_NAME))
results_df['mre'].plot()
print('Minimum MRE param set: \n {}'.format(results_df.iloc[np.argmin(results_df['mre'])]))
print('Maximum R^2 param set: \n {}'.format(results_df.iloc[np.argmax(results_df['r2'])]))
toc = time()
print('Elapsed time: {} seconds.'.format((toc-tic)))
Explanation: - Random Forest model
End of explanation |
14,790 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Step1: Named Entity Recognition
In any text document, there are particular terms that represent specific entities that are more informative and have a unique context. These entities are known as named entities , which more specifically refer to terms that represent real-world objects like people, places, organizations, and so on, which are often denoted by proper names.
Named entity recognition (NER) , also known as entity chunking/extraction , is a popular technique used in information extraction to identify and segment the named entities and classify or categorize them under various predefined classes.
There are out of the box NER taggers available through popular libraries like nltk and spacy. Each library follows a different approach to solve the problem.
NER with SpaCy
Step2: Spacy offers fast NER tagger based on a number of techniques. The exact algorithm hasn't been talked about in much detail but the documentation marks it as <font color=blue> "The exact algorithm is a pastiche of well-known methods, and is not currently described in any single publication " </font>
The entities identified by spacy NER tagger are as shown in the following table (details here
Step3: NER with Stanford NLP
Stanford’s Named Entity Recognizer is based on an implementation of linear chain Conditional Random Field (CRF) sequence models.
Prerequisites
Step4: NER with Stanford CoreNLP
NLTK is slowly deprecating the old Stanford Parsers in favor of the more active Stanford Core NLP Project. It might even get removed after nltk version 3.4 so best to stay updated.
Details | Python Code:
text = Three more countries have joined an “international grand committee” of parliaments, adding to calls for
Facebook’s boss, Mark Zuckerberg, to give evidence on misinformation to the coalition. Brazil, Latvia and Singapore
bring the total to eight different parliaments across the world, with plans to send representatives to London on 27
November with the intention of hearing from Zuckerberg. Since the Cambridge Analytica scandal broke, the Facebook chief
has only appeared in front of two legislatures: the American Senate and House of Representatives, and the European parliament.
Facebook has consistently rebuffed attempts from others, including the UK and Canadian parliaments, to hear from Zuckerberg.
He added that an article in the New York Times on Thursday, in which the paper alleged a pattern of behaviour from Facebook
to “delay, deny and deflect” negative news stories, “raises further questions about how recent data breaches were allegedly
dealt with within Facebook.”
print(text)
import re
text = re.sub(r'\n', '', text)
text
import spacy
nlp = spacy.load('en')
text_nlp = nlp(text)
# print named entities in article
ner_tagged = [(word.text, word.ent_type_) for word in text_nlp]
print(ner_tagged)
from spacy import displacy
# visualize named entities
displacy.render(text_nlp, style='ent', jupyter=True)
Explanation: Named Entity Recognition
In any text document, there are particular terms that represent specific entities that are more informative and have a unique context. These entities are known as named entities , which more specifically refer to terms that represent real-world objects like people, places, organizations, and so on, which are often denoted by proper names.
Named entity recognition (NER) , also known as entity chunking/extraction , is a popular technique used in information extraction to identify and segment the named entities and classify or categorize them under various predefined classes.
There are out of the box NER taggers available through popular libraries like nltk and spacy. Each library follows a different approach to solve the problem.
NER with SpaCy
End of explanation
named_entities = []
temp_entity_name = ''
temp_named_entity = None
for term, tag in ner_tagged:
if tag:
temp_entity_name = ' '.join([temp_entity_name, term]).strip()
temp_named_entity = (temp_entity_name, tag)
else:
if temp_named_entity:
named_entities.append(temp_named_entity)
temp_entity_name = ''
temp_named_entity = None
print(named_entities)
from collections import Counter
c = Counter([item[1] for item in named_entities])
c.most_common()
Explanation: Spacy offers fast NER tagger based on a number of techniques. The exact algorithm hasn't been talked about in much detail but the documentation marks it as <font color=blue> "The exact algorithm is a pastiche of well-known methods, and is not currently described in any single publication " </font>
The entities identified by spacy NER tagger are as shown in the following table (details here: spacy_documentation)
End of explanation
import os
from nltk.tag import StanfordNERTagger
JAVA_PATH = r'C:\Program Files\Java\jre1.8.0_192\bin\java.exe'
os.environ['JAVAHOME'] = JAVA_PATH
STANFORD_CLASSIFIER_PATH = 'E:/stanford/stanford-ner-2014-08-27/classifiers/english.all.3class.distsim.crf.ser.gz'
STANFORD_NER_JAR_PATH = 'E:/stanford/stanford-ner-2014-08-27/stanford-ner.jar'
sn = StanfordNERTagger(STANFORD_CLASSIFIER_PATH,
path_to_jar=STANFORD_NER_JAR_PATH)
sn
text_enc = text.encode('ascii', errors='ignore').decode('utf-8')
ner_tagged = sn.tag(text_enc.split())
print(ner_tagged)
named_entities = []
temp_entity_name = ''
temp_named_entity = None
for term, tag in ner_tagged:
if tag != 'O':
temp_entity_name = ' '.join([temp_entity_name, term]).strip()
temp_named_entity = (temp_entity_name, tag)
else:
if temp_named_entity:
named_entities.append(temp_named_entity)
temp_entity_name = ''
temp_named_entity = None
print(named_entities)
c = Counter([item[1] for item in named_entities])
c.most_common()
Explanation: NER with Stanford NLP
Stanford’s Named Entity Recognizer is based on an implementation of linear chain Conditional Random Field (CRF) sequence models.
Prerequisites: Download the official Stanford NER Tagger from here, which seems to work quite well. You can try out a later version by going to this website
This model is only trained on instances of PERSON, ORGANIZATION and LOCATION types. The model is exposed through nltk wrappers.
End of explanation
from nltk.parse import CoreNLPParser
ner_tagger = CoreNLPParser(url='http://localhost:9000', tagtype='ner')
ner_tagger
import nltk
tags = list(ner_tagger.raw_tag_sents(nltk.sent_tokenize(text)))
tags = [sublist[0] for sublist in tags]
tags = [word_tag for sublist in tags for word_tag in sublist]
print(tags)
named_entities = []
temp_entity_name = ''
temp_named_entity = None
for term, tag in tags:
if tag != 'O':
temp_entity_name = ' '.join([temp_entity_name, term]).strip()
temp_named_entity = (temp_entity_name, tag)
else:
if temp_named_entity:
named_entities.append(temp_named_entity)
temp_entity_name = ''
temp_named_entity = None
print(named_entities)
c = Counter([item[1] for item in named_entities])
c.most_common()
Explanation: NER with Stanford CoreNLP
NLTK is slowly deprecating the old Stanford Parsers in favor of the more active Stanford Core NLP Project. It might even get removed after nltk version 3.4 so best to stay updated.
Details: https://github.com/nltk/nltk/issues/1839
Step by Step Tutorial here: https://github.com/nltk/nltk/wiki/Stanford-CoreNLP-API-in-NLTK
Sadly a lot of things have changed in the process so we need to do some extra effort to make it work!
Get CoreNLP from here
After you download, go to the folder and spin up a terminal and start the Core NLP Server locally
E:\> java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -preload tokenize,ssplit,pos,lemma,ner,parse,depparse -status_port 9000 -port 9000 -timeout 15000
If it runs successfully you should see the following messages on the terminal
E:\stanford\stanford-corenlp-full-2018-02-27>java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -preload tokenize,ssplit,pos,lemma,ner,parse,depparse -status_port 9000 -port 9000 -timeout 15000
[main] INFO CoreNLP - --- StanfordCoreNLPServer#main() called ---
[main] INFO CoreNLP - setting default constituency parser
[main] INFO CoreNLP - warning: cannot find edu/stanford/nlp/models/srparser/englishSR.ser.gz
[main] INFO CoreNLP - using: edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz instead
[main] INFO CoreNLP - to use shift reduce parser download English models jar from:
[main] INFO CoreNLP - http://stanfordnlp.github.io/CoreNLP/download.html
[main] INFO CoreNLP - Threads: 4
[main] INFO edu.stanford.nlp.pipeline.StanfordCoreNLP - Adding annotator tokenize
[main] INFO edu.stanford.nlp.pipeline.TokenizerAnnotator - No tokenizer type provided. Defaulting to PTBTokenizer.
[main] INFO edu.stanford.nlp.pipeline.StanfordCoreNLP - Adding annotator ssplit
[main] INFO edu.stanford.nlp.pipeline.StanfordCoreNLP - Adding annotator pos
[main] INFO edu.stanford.nlp.tagger.maxent.MaxentTagger - Loading POS tagger from edu/stanford/nlp/models/pos-tagger/english-left3words/english-left3words-distsim.tagger ... done [1.4 sec].
[main] INFO edu.stanford.nlp.pipeline.StanfordCoreNLP - Adding annotator lemma
[main] INFO edu.stanford.nlp.pipeline.StanfordCoreNLP - Adding annotator ner
[main] INFO edu.stanford.nlp.ie.AbstractSequenceClassifier - Loading classifier from edu/stanford/nlp/models/ner/english.all.3class.distsim.crf.ser.gz ... done [1.9 sec].
[main] INFO edu.stanford.nlp.ie.AbstractSequenceClassifier - Loading classifier from edu/stanford/nlp/models/ner/english.muc.7class.distsim.crf.ser.gz ... done [2.0 sec].
[main] INFO edu.stanford.nlp.ie.AbstractSequenceClassifier - Loading classifier from edu/stanford/nlp/models/ner/english.conll.4class.distsim.crf.ser.gz ... done [0.8 sec].
[main] INFO edu.stanford.nlp.time.JollyDayHolidays - Initializing JollyDayHoliday for SUTime from classpath edu/stanford/nlp/models/sutime/jollyday/Holidays_sutime.xml as sutime.binder.1.
[main] INFO edu.stanford.nlp.time.TimeExpressionExtractorImpl - Using following SUTime rules: edu/stanford/nlp/models/sutime/defs.sutime.txt,edu/stanford/nlp/models/sutime/english.sutime.txt,edu/stanford/nlp/models/sutime/english.holidays.sutime.txt
[main] INFO edu.stanford.nlp.pipeline.TokensRegexNERAnnotator - TokensRegexNERAnnotator ner.fine.regexner: Read 580641 unique entries out of 581790 from edu/stanford/nlp/models/kbp/regexner_caseless.tab, 0 TokensRegex patterns.
[main] INFO edu.stanford.nlp.pipeline.TokensRegexNERAnnotator - TokensRegexNERAnnotator ner.fine.regexner: Read 4857 unique entries out of 4868 from edu/stanford/nlp/models/kbp/regexner_cased.tab, 0 TokensRegex patterns.
[main] INFO edu.stanford.nlp.pipeline.TokensRegexNERAnnotator - TokensRegexNERAnnotator ner.fine.regexner: Read 585498 unique entries from 2 files
[main] INFO edu.stanford.nlp.pipeline.StanfordCoreNLP - Adding annotator parse
[main] INFO edu.stanford.nlp.parser.common.ParserGrammar - Loading parser from serialized file edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz ... done [4.6 sec].
[main] INFO edu.stanford.nlp.pipeline.StanfordCoreNLP - Adding annotator depparse
[main] INFO edu.stanford.nlp.parser.nndep.DependencyParser - Loading depparse model: edu/stanford/nlp/models/parser/nndep/english_UD.gz ...
[main] INFO edu.stanford.nlp.parser.nndep.Classifier - PreComputed 99996, Elapsed Time: 22.43 (s)
[main] INFO edu.stanford.nlp.parser.nndep.DependencyParser - Initializing dependency parser ... done [24.4 sec].
[main] INFO CoreNLP - Starting server...
[main] INFO CoreNLP - StanfordCoreNLPServer listening at /0:0:0:0:0:0:0:0:9000
End of explanation |
14,791 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Using Interrupts and asyncio for Buttons and Switches
This notebook provides a simple example for using asyncio I/O to interact asynchronously with multiple input devices. A task is created for each input device and coroutines used to process the results. To demonstrate, we recreate the flashing LEDs example in the getting started notebook but using interrupts to avoid polling the GPIO devices. The aim is have holding a button result in the corresponding LED flashing.
Initialising the Environment
First we import an instantiate all required classes to interact with the buttons, switches and LED and ensure the base overlay is loaded.
Step1: Define the flash LED task
Next step is to create a task that waits for the button to be pressed and flash the LED until the button is released. The while True loop ensures that the coroutine keeps running until cancelled so that multiple presses of the same button can be handled.
Step2: Create the task
As there are four buttons we want to check, we create four tasks. The function asyncio.ensure_future is used to convert the coroutine to a task and schedule it in the event loop. The tasks are stored in an array so they can be referred to later when we want to cancel them.
Step3: Monitoring the CPU Usage
One of the advantages of interrupt-based I/O is to minimised CPU usage while waiting for events. To see how CPU usages is impacted by the flashing LED tasks we create another task that prints out the current CPU utilisation every 3 seconds.
Step4: Run the event loop
All of the blocking wait_for commands will run the event loop until the condition is met. All that is needed is to call the blocking wait_for_level method on the switch we are using as the termination condition.
While waiting for switch 0 to get high, users can press any push button on the board to flash the corresponding LED. While this loop is running, try opening a terminal and running top to see that python is consuming no CPU cycles while waiting for peripherals.
As this code runs until the switch 0 is high, make sure it is low before running the example.
Step5: Clean up
Even though the event loop has stopped running, the tasks are still active and will run again when the event loop is next used. To avoid this, the tasks should be cancelled when they are no longer needed.
Step6: Now if we re-run the event loop, nothing will happen when we press the buttons. The process will block until the switch is set back down to the low position. | Python Code:
from pynq import PL
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
Explanation: Using Interrupts and asyncio for Buttons and Switches
This notebook provides a simple example for using asyncio I/O to interact asynchronously with multiple input devices. A task is created for each input device and coroutines used to process the results. To demonstrate, we recreate the flashing LEDs example in the getting started notebook but using interrupts to avoid polling the GPIO devices. The aim is have holding a button result in the corresponding LED flashing.
Initialising the Environment
First we import an instantiate all required classes to interact with the buttons, switches and LED and ensure the base overlay is loaded.
End of explanation
import asyncio
async def flash_led(num):
while True:
await base.buttons[num].wait_for_value_async(1)
while base.buttons[num].read():
base.leds[num].toggle()
await asyncio.sleep(0.1)
base.leds[num].off()
Explanation: Define the flash LED task
Next step is to create a task that waits for the button to be pressed and flash the LED until the button is released. The while True loop ensures that the coroutine keeps running until cancelled so that multiple presses of the same button can be handled.
End of explanation
tasks = [asyncio.ensure_future(flash_led(i)) for i in range(4)]
Explanation: Create the task
As there are four buttons we want to check, we create four tasks. The function asyncio.ensure_future is used to convert the coroutine to a task and schedule it in the event loop. The tasks are stored in an array so they can be referred to later when we want to cancel them.
End of explanation
import psutil
async def print_cpu_usage():
# Calculate the CPU utilisation by the amount of idle time
# each CPU has had in three second intervals
last_idle = [c.idle for c in psutil.cpu_times(percpu=True)]
while True:
await asyncio.sleep(3)
next_idle = [c.idle for c in psutil.cpu_times(percpu=True)]
usage = [(1-(c2-c1)/3) * 100 for c1,c2 in zip(last_idle, next_idle)]
print("CPU Usage: {0:3.2f}%, {1:3.2f}%".format(*usage))
last_idle = next_idle
tasks.append(asyncio.ensure_future(print_cpu_usage()))
Explanation: Monitoring the CPU Usage
One of the advantages of interrupt-based I/O is to minimised CPU usage while waiting for events. To see how CPU usages is impacted by the flashing LED tasks we create another task that prints out the current CPU utilisation every 3 seconds.
End of explanation
if base.switches[0].read():
print("Please set switch 0 low before running")
else:
base.switches[0].wait_for_value(1)
Explanation: Run the event loop
All of the blocking wait_for commands will run the event loop until the condition is met. All that is needed is to call the blocking wait_for_level method on the switch we are using as the termination condition.
While waiting for switch 0 to get high, users can press any push button on the board to flash the corresponding LED. While this loop is running, try opening a terminal and running top to see that python is consuming no CPU cycles while waiting for peripherals.
As this code runs until the switch 0 is high, make sure it is low before running the example.
End of explanation
[t.cancel() for t in tasks]
Explanation: Clean up
Even though the event loop has stopped running, the tasks are still active and will run again when the event loop is next used. To avoid this, the tasks should be cancelled when they are no longer needed.
End of explanation
base.switches[0].wait_for_value(0)
Explanation: Now if we re-run the event loop, nothing will happen when we press the buttons. The process will block until the switch is set back down to the low position.
End of explanation |
14,792 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<img src="images/JHI_STRAP_Web.png" style="width
Step1: <a id="load"></a>
Load results
We load data from the multiplexed run that was performed on the JHI cluster, as described in README.md.
The column containing the experimentally-measured predictor (input intensity) is headed log_input, and the column containing the measured output probe intensity is headed log_output.
The predicted output mean and median values for 1000 iterations of two chains are found in the columns y_pred_mean and y_pred_median. 5%, 25%, 75% and 95% percentiles for predicted output are given in the columns y_pred_5pc, y_pred_25pc, y_pred_75pc and y_pred_95pc. These can be combined to provide 50% and 90% credibility intervals for each predicted output intensity.
Step2: <div class="alert-warning">
The crossvalidation data has 49872 rows (one for each combintation of probe, treatment and replicate) and 20 columns, named as shown.
</div>
<a id="loadfit"></a>
Load full fit
We want to investigate how predictive performance compares with our estimate of parameters from a fit on full data, to see - for example - whether the genes for which we have confidence in a treatment effect are associated with good predictions of output probe intensity.
We load the model fit results, generated by notebook 02-full_model_fit.ipynb, from the associated output pickle file.
Step3: The estimates dataframe contains the notebook 02-full_model_fit.ipynb estimates of parameters for each gene, with percentiles enabling a 95% or 50% credibility interval to be estimated.
<a id="merge"></a>
Merge crossvalidation and fit
We join the crossvalidation data with the fits for the corresponding genes, on the basis of locus_tag, to make plotting and analysis easier. We also reduce the columns in the dataset to a subset describing the locus tag, probe ID, the treatment and replicate factor, the measured log-transformed input and output intensities, and the median and 90% CI for the predicted output intensity. We also keep the estimated median $\delta$ and associated 95% CI so we can investigate the relationship with treatment effect.
Step4: <a id="errors"></a>
Calculate prediction error
We create new columns to represent the error in crossvalidation prediction of output intensity
Step5: <a id="errdist"></a>
Error distributions
We inspect the distribution of these errors directly by plotting.
<br /><div class="alert-success">
<b>There are some strongly outlying points with large error, but most prediction errors, either relative or absolute, are close to zero.</b>
</div>
Step6: <a id="erroutput"></a>
Error with respect to measured intensity
By plotting the absolute and relative error in output intensity prediction against measured values, we can get an idea of whether the errors are uniformly distributed, or likely to be associated primarily with weak measured intensities.
We might expect an overabundance of large relative error for low measured intensity values, as the relative effect of a constant absolute error will be greater for these smaller values.
Step7: <div class="alert-success">
<b>The plots above indicate that absolute prediction errors are small for probes with intensities of 6 or greater. In particular, probes with strong intensities have very low absolute and relative error.</b>
</div>
<a id="errtrt"></a>
Error with respect to estimated treatment effect
By relating the absolute and relative error to the estimated treatment effect, we can interpret whether we should continue to be confident in the results of notebook 02-full_model_fit.ipynb. If the 25% percentile for our estimate of $\delta$ is greater than zero, then the 50% CI for that probe's gene does not include zero, and we interpret this as a positive effect due to treatment.
Step8: <br /><div class="alert-warning">
Median and 25% percentile values of $\delta_{j[i]}$ close to or less than zero are associated most frequently with large errors. A small number of large absolute and relative errors are associated with values that are positive and quite far from zero.
</div>
We subset the data to investigate this more closely, in the dataset trt_pos representing the 115 locus tags with an estimated positive treatment effect.
Step9: By visual inspection, most probe prediction errors for the "treatment positive" probes are small in relative terms, but appear to be Normally distributed in absolute terms with respect to the value of d_median. We can examine the error distributions for these probes with respect to log input and output intensities, as above.
Step10: <br /><div class="alert-success">
These plots show that a large relative prediction error is associated mainly with probes that have low measured input or output intensity, less than ≈4 units.
</div>
<a id="errinterval"></a>
Error with respect to prediction interval
As an estimate of prediction accuracy, we can calculate the number of observed output intensities that lie outwith the 90% credibility interval of the prediction. We create the column pred_success, which contains True where the predicted output value lies in the 90% CI for the crossvalidation predictions.
Step11: <div class="alert-success">
<b>This identifies 4048/49872 probe predictions (an 8% misprediction rate!), covering 1101 locus_tags in total.</b>
</div>
We can gain an insight into the number of probes that are likely to be in error for any particular locus tag, by plotting the distribution of their counts
Step12: Most of the locus tags with prediction errors have errors in 4 probes or fewer (out of an average of six or so), so in general we might expect most probes for most locus tags to be relatively well-predicted by our model.
But is this the case for the locus tags with a predicted positive treatment effect?
<a id="#errpos"></a>
Prediction errors for positive treatment effects
We can examine the distribution of crossvalidation prediction errors for the locus tags with positive estimated treatment effect, and compare this to the distribution for the dataset as a whole. If these are similar, then there may be no systematic misprediction for the positive estimated treatment values.
Step13: <div class="alert-danger">
<b>We find that 389/1566 of our probes (around 25% of predictions, covering 77 of the 115 locus tags with an estimated positive effect) have measured output intensity that lies outwith the 90% CI for the crossvalidation prediction. This is a considerably higher rate than for the dataset as a whole.</b>
</div>
Plotting the distribution of the number of probes in error for each locus tag also shows a noisier distribution to that for the main dataset, where some predictions have a much larger number of probe predictions that appear to be in error
Step14: The modal number of probes in error is one, but the distribution has a different shape to that for the complete dataset, suggesting a systematic failure of the model to predict precisely the output intensity for some probes.
<a id="candidates"></a>
Prediction errors for positive treatment effect candidates
We break down the prediction errors for our positive treatment effect candidates into two types (after Gelman & Carlin (2014) DOI
Step15: <div class="alert-success">
<b>There are no observed Type S errors.</b>
</div>
All errors in the predicted output intensity are Type M errors
Step16: In general, most probe predictions are within one log unit of the observed value for the probes corresponding to positive treatment effects. This is encouraging and suggests that, in general, the parameter estimates and model are appropriate.
Step17: <div class="alert-success">
<b>The violinplot of errors for probes whose observed output isn't in the 90% CI for the predicted output indicates that most errors are less than 2 log units from the observed value in either direction. The number of positive and negative errors are also very similar, suggesting that there is no systematic bias towards over- or under-estimation.</b>
</div>
<a id="questionable"></a>
Questionable locus tags
Although our results seem to be on the whole quite sound, there are some locus tags for which the results may be more questionable. We can identify the total number of probes for each locus tag, and the number of probe output predictions that are in error, and determine which locus tags appear to have an unusually high proportion of errors as having more questionable estimates.
We generate a dataframe, indexed by locus tag, with a column fail_prop that describes the proportion of probe predictions that are in error.
Step18: From this data, we see that most locus tags in this dataset have no probe errors, and a small proportion have more than 50% probe errors. We can inspect the predictions for those locus tags directly.
Step19: <div class="alert-success">
<b>This identifies 10 questionable locus tags, with 110 Type M probe errors, in total.</b>
</div>
We use the function plot_locustag_predictions() to visualise the prediction results for these genes directly. In these plots, values 0-2 indicate control replicates, and values 3-5 treatment replicates. The grey points show measured input intensity for the probe, and the black points show measured output intensity. The coloured points show predicted median values, and the bars indicate the 90% credibility interval for the prediction.
Yellow and blue bars indicate that the observed output intensity lies within the 90% CI; green and red bars indicate that the observed output value lies outwith the 90% CI. | Python Code:
%pylab inline
import os
import pickle
import warnings; warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import scipy
import seaborn as sns; sns.set_context('notebook')
import tools
Explanation: <img src="images/JHI_STRAP_Web.png" style="width: 150px; float: right;">
Supplementary Information: Holmes et al. 2020
3. Crossvalidation
This notebook describes analysis of 10-fold crossvalidation data of the Bayesian hierarchical model described in notebook 02-full_model_fit.ipynb. This model estimates the selective effects on growth (control) and leaf passage (treatment) due to individual genes from E. coli DH10B (carrier) and Sakai (BAC load). The estimates are made on the basis of data obtained using a multi-E. coli microarray.
Much of the code for the visualisation, analysis and data manipulation of the fitting results is found in the associated Python module tools.py, which should also be present in this directory.
Table of Contents
Crossvalidation summary
Load results
Load full fit
Merge crossvalidation and fit
Calculate prediction errors
Error distributions
Error with respect to measured output intensity
Error with respect to estimated treatment effect
Error with respect to prediction interval
Prediction errors for positive treatment effects
Prediction errors for positive treatment effect candidates
Plotting distribution of effects
Identifying candidates
Calculating Type M and Type S errors
Over- and Under-estimates
<a id="summary"></a>
Crossvalidation summary
We will load predicted and measured output probe intensity data from a 10-fold crossvalidation, performed as described in the file README.md (the data provided in this repository was obtaind from a run on the JHI cluster).
<br /><div class="alert-success">
<b>The Stan model used to generate the crossvalidation results is identical in form to that used in the notebook 02-full_model_fit.ipynb, except that it is fit ten times, each time on a random 90% of the data, and a prediction made for the output probe intensity of the remaining 10% of the data, given the measured input probe intensities as a predictor. The results of these ten runs are combined in the file 10-fold_CV.tab.</b>
</div>
We can assess the applicability and success of the Stan model by investigating how well it can predict the measured output intensity of a probe, given its measured input intensity. We are conducting crossvalidation on the original dataset, and we consider a prediction to be "correct" if the measured output intensity lies within the 90% credibility interval of the predicted output intensity.
<br /><div class="alert-success">
<b>We assess the performance of the model in two ways.</b>
</div>
Failed predictions as those predictions of output intensity that do not include the measured input intensity in their 90% CI.
As Type S/Type M errors for the failed predictions (after Gelman & Carlin (2014) DOI: 10.1177/1745691614551642). Here, we assess the performance of the model by comparing the median predicted output intensity to the measured output intensity, for each probe. If the difference between predicted output intensity and measured input intensity has the opposite sign to that for the difference between measured output intensity and measured input intensity, we consider the prediction to be a Type S (sign) error. If, however, the direction of the prediction is correct (no Type S error), but the magnitude is incorrect, we consider this prediction to be a Type M (magnitude) error. We can tabulate these results in the same way as we might do so for false positive and false negative errors.
Python imports
End of explanation
# Crossvalidation results file
resultsfile = os.path.join("datasets", "10-fold_CV.tab")
# Load predictions from file
results = pd.read_csv(resultsfile, sep='\t', index_col=0)
print(results.shape)
print(results.columns)
# Inspect crossvalidation results
results.head()
Explanation: <a id="load"></a>
Load results
We load data from the multiplexed run that was performed on the JHI cluster, as described in README.md.
The column containing the experimentally-measured predictor (input intensity) is headed log_input, and the column containing the measured output probe intensity is headed log_output.
The predicted output mean and median values for 1000 iterations of two chains are found in the columns y_pred_mean and y_pred_median. 5%, 25%, 75% and 95% percentiles for predicted output are given in the columns y_pred_5pc, y_pred_25pc, y_pred_75pc and y_pred_95pc. These can be combined to provide 50% and 90% credibility intervals for each predicted output intensity.
End of explanation
# File containing pickled fit from notebook 02
fitfile = os.path.join("model_fits", "full_model_fit.pkl")
# Load array measurements and get locus tags/arrays as indices
datafile = os.path.join("datasets", "normalised_array_data.tab")
indata = pd.read_csv(datafile, sep="\t")
locus_tags = indata['locus_tag'].unique()
arrays = indata['repXtrt'].unique()
# Load the pickled fit into `estimates`
fit = pd.read_pickle(open(fitfile, 'rb'))
(estimates_by_probe, estimates) = tools.extract_variable_summaries(fit, 'df',
['a', 'b', 'g', 'd'],
[arrays, locus_tags, arrays, locus_tags],
indata)
estimates.head() # inspect the data
Explanation: <div class="alert-warning">
The crossvalidation data has 49872 rows (one for each combintation of probe, treatment and replicate) and 20 columns, named as shown.
</div>
<a id="loadfit"></a>
Load full fit
We want to investigate how predictive performance compares with our estimate of parameters from a fit on full data, to see - for example - whether the genes for which we have confidence in a treatment effect are associated with good predictions of output probe intensity.
We load the model fit results, generated by notebook 02-full_model_fit.ipynb, from the associated output pickle file.
End of explanation
# Columns to keep from the merged data
resultscols = ['locus_tag', 'probe', 'replicate', 'treatment',
'log_input', 'log_output', 'y_pred_5pc', 'y_pred_median', 'y_pred_95pc',
'd_2.5pc', 'd_25pc', 'd_median', 'd_75pc', 'd_97.5pc']
# Merge fit estimates with observed data
results_merged = pd.merge(results, estimates,
how='outer',
left_on='locus_tag', right_on='locus_tag').loc[:, resultscols]
results_merged.head()
Explanation: The estimates dataframe contains the notebook 02-full_model_fit.ipynb estimates of parameters for each gene, with percentiles enabling a 95% or 50% credibility interval to be estimated.
<a id="merge"></a>
Merge crossvalidation and fit
We join the crossvalidation data with the fits for the corresponding genes, on the basis of locus_tag, to make plotting and analysis easier. We also reduce the columns in the dataset to a subset describing the locus tag, probe ID, the treatment and replicate factor, the measured log-transformed input and output intensities, and the median and 90% CI for the predicted output intensity. We also keep the estimated median $\delta$ and associated 95% CI so we can investigate the relationship with treatment effect.
End of explanation
# Calculate absolute and relative prediction error
results_merged['y_pred_abs_error'] = results_merged['y_pred_median'] - results_merged['log_output']
results_merged['y_pred_rel_error'] = results_merged['y_pred_abs_error']/results_merged['log_output']
# Calculate observed and predicted differences
results_merged['y_diff_pred'] = results_merged['y_pred_median'] - results_merged['log_input']
results_merged['y_diff_obs'] = results_merged['log_output'] - results_merged['log_input']
Explanation: <a id="errors"></a>
Calculate prediction error
We create new columns to represent the error in crossvalidation prediction of output intensity:
y_pred_abs_error: the absolute error (y_pred_median - log_output)
y_pred_rel_error: the relative error (y_pred_abs_error/log_output)
and to represent the absolute difference between the measured input and: (i) the measured output; (ii) the predicted output
y_diff_pred: absolute difference between input and prediction (y_pred_median - log_input)
y_diff_obs: absolute difference between input and measured output (log_output - log_input)
End of explanation
# Plot prediction errors boxplots
tools.plot_errors(results_merged)
Explanation: <a id="errdist"></a>
Error distributions
We inspect the distribution of these errors directly by plotting.
<br /><div class="alert-success">
<b>There are some strongly outlying points with large error, but most prediction errors, either relative or absolute, are close to zero.</b>
</div>
End of explanation
# Plot relative and absolute error wrt input and output
tools.plot_error_vs_column(results_merged, "log_input")
tools.plot_error_vs_column(results_merged, "log_output")
Explanation: <a id="erroutput"></a>
Error with respect to measured intensity
By plotting the absolute and relative error in output intensity prediction against measured values, we can get an idea of whether the errors are uniformly distributed, or likely to be associated primarily with weak measured intensities.
We might expect an overabundance of large relative error for low measured intensity values, as the relative effect of a constant absolute error will be greater for these smaller values.
End of explanation
# Plot errors relative to 25% percentile for estimate of treatment effect
tools.plot_error_vs_column(results_merged, "d_25pc")
tools.plot_error_vs_column(results_merged, "d_median")
Explanation: <div class="alert-success">
<b>The plots above indicate that absolute prediction errors are small for probes with intensities of 6 or greater. In particular, probes with strong intensities have very low absolute and relative error.</b>
</div>
<a id="errtrt"></a>
Error with respect to estimated treatment effect
By relating the absolute and relative error to the estimated treatment effect, we can interpret whether we should continue to be confident in the results of notebook 02-full_model_fit.ipynb. If the 25% percentile for our estimate of $\delta$ is greater than zero, then the 50% CI for that probe's gene does not include zero, and we interpret this as a positive effect due to treatment.
End of explanation
# Subset data to positive estimates of delta only
trt_pos = results_merged[results_merged['d_25pc'] > 0]
# Inspect results
trt_pos.head()
# Plot errors relative to 25% percentile for estimate of treatment effect
tools.plot_error_vs_column(trt_pos, "d_25pc")
tools.plot_error_vs_column(trt_pos, "d_median")
Explanation: <br /><div class="alert-warning">
Median and 25% percentile values of $\delta_{j[i]}$ close to or less than zero are associated most frequently with large errors. A small number of large absolute and relative errors are associated with values that are positive and quite far from zero.
</div>
We subset the data to investigate this more closely, in the dataset trt_pos representing the 115 locus tags with an estimated positive treatment effect.
End of explanation
# Plot errors for positive treatment effects wrt measured intensities
tools.plot_error_vs_column(trt_pos, "log_input")
tools.plot_error_vs_column(trt_pos, "log_output")
Explanation: By visual inspection, most probe prediction errors for the "treatment positive" probes are small in relative terms, but appear to be Normally distributed in absolute terms with respect to the value of d_median. We can examine the error distributions for these probes with respect to log input and output intensities, as above.
End of explanation
# Add a column for probe predictions that lie outwith the 90% credibility interval of prediction
results_merged['pred_success'] = (results_merged['log_output'] > results_merged['y_pred_5pc']) & \
(results_merged['log_output'] < results_merged['y_pred_95pc'])
# Inspect data
results_merged.head()
# Make a dataframe of missed predictions
errors = results_merged[results_merged['pred_success'] == False]
print(errors.shape, results_merged.shape, len(errors['locus_tag'].unique()))
Explanation: <br /><div class="alert-success">
These plots show that a large relative prediction error is associated mainly with probes that have low measured input or output intensity, less than ≈4 units.
</div>
<a id="errinterval"></a>
Error with respect to prediction interval
As an estimate of prediction accuracy, we can calculate the number of observed output intensities that lie outwith the 90% credibility interval of the prediction. We create the column pred_success, which contains True where the predicted output value lies in the 90% CI for the crossvalidation predictions.
End of explanation
# Distribution of probes in error, by locus tag
error_probe_counts = errors['locus_tag'].groupby(errors['locus_tag']).agg(['count'])
error_probe_counts.columns=['probes_in_error']
ax = sns.distplot(error_probe_counts['probes_in_error'], bins=max(error_probe_counts['probes_in_error']))
ax.set_title("probes in error, by locus tag")
ax.set_xlim(0, max(error_probe_counts['probes_in_error']));
Explanation: <div class="alert-success">
<b>This identifies 4048/49872 probe predictions (an 8% misprediction rate!), covering 1101 locus_tags in total.</b>
</div>
We can gain an insight into the number of probes that are likely to be in error for any particular locus tag, by plotting the distribution of their counts:
End of explanation
# Subset data to positive estimates of delta only
trt_pos = results_merged[results_merged['d_25pc'] > 0]
trt_errors = trt_pos[trt_pos['pred_success'] == False]
print(trt_errors.shape, trt_pos.shape, len(trt_errors['locus_tag'].unique()))
Explanation: Most of the locus tags with prediction errors have errors in 4 probes or fewer (out of an average of six or so), so in general we might expect most probes for most locus tags to be relatively well-predicted by our model.
But is this the case for the locus tags with a predicted positive treatment effect?
<a id="#errpos"></a>
Prediction errors for positive treatment effects
We can examine the distribution of crossvalidation prediction errors for the locus tags with positive estimated treatment effect, and compare this to the distribution for the dataset as a whole. If these are similar, then there may be no systematic misprediction for the positive estimated treatment values.
End of explanation
trt_error_probe_counts = trt_errors['locus_tag'].groupby(trt_errors['locus_tag']).agg(['count'])
trt_error_probe_counts.columns=['probes_in_error']
ax = sns.distplot(trt_error_probe_counts['probes_in_error'], bins=max(trt_error_probe_counts['probes_in_error']))
ax.set_title("probes in error, by locus tag (positive trt effect)")
ax.set_xlim(0, max(trt_error_probe_counts['probes_in_error']));
Explanation: <div class="alert-danger">
<b>We find that 389/1566 of our probes (around 25% of predictions, covering 77 of the 115 locus tags with an estimated positive effect) have measured output intensity that lies outwith the 90% CI for the crossvalidation prediction. This is a considerably higher rate than for the dataset as a whole.</b>
</div>
Plotting the distribution of the number of probes in error for each locus tag also shows a noisier distribution to that for the main dataset, where some predictions have a much larger number of probe predictions that appear to be in error:
End of explanation
# Create columns for error types
trt_pos['type_s'] = trt_pos['pred_success'] is False and (trt_pos['y_diff_pred']/trt_pos['y_diff_obs'] < 0)
trt_pos['type_m'] = (trt_pos['type_s'] == False) & (trt_pos['pred_success'] == False)
# Inspect data
trt_pos.head()
# How many errors of each type?
print("Type S errors: %d" % sum(trt_pos['type_s']))
print("Type M errors: %d" % sum(trt_pos['type_m']))
Explanation: The modal number of probes in error is one, but the distribution has a different shape to that for the complete dataset, suggesting a systematic failure of the model to predict precisely the output intensity for some probes.
<a id="candidates"></a>
Prediction errors for positive treatment effect candidates
We break down the prediction errors for our positive treatment effect candidates into two types (after Gelman & Carlin (2014) DOI: 10.1177/1745691614551642).
Type S (sign): the difference between predicted output intensity and measured input intensity has the opposite sign to that for the difference between measured output intensity and measured input intensity
Type M (magnitude): the direction of the prediction is correct (no Type S error), but the magnitude is incorrect
<br /><div class="alert-warning">
A large proportion of predictions with a Type S error may be critically indicative of a misestimate of $\delta$ for a given locus tag. By contrast, a preponderance of Type M errors might indicate a generally correct estimate that there is an effect, but for some reason (perhaps a faulty run that has a strong effect on the crossvalidation training set) misestimates the magnitude of $\delta$.
</div>
<a id="calc_errors"></a>
Calculating Type M and Type S errors
We create two new columns in the trt_pos dataframe that contains estimates for those locus tags with an estimated positive value for $\delta$, the effect on treatment/passage:
type_s: there is a prediction error, and the sign of the predicted and observed differences are the same value - trt_pos['y_diff_pred']/trt_pos['y_diff_obs'] > 0
type_m: there is a prediction error, but it's not Type S - trt_pos['type_s'] is False and trt_pos['pred_success'] is False
End of explanation
# Violinplot of prediction error for all probes
sns.violinplot(trt_pos['log_output'] - trt_pos['y_pred_median']);
Explanation: <div class="alert-success">
<b>There are no observed Type S errors.</b>
</div>
All errors in the predicted output intensity are Type M errors: errors in the predicted effect magnitude, where the effect is observed to be in the correct direction, with respect to the input intensity.
<div class="alert-success">
<b>We should, therefore, be confident that our predictions of $\delta$ are accurate in terms of their positive direction.</b>
</div>
<a id="overunder"></a>
Over- and Under-estimates
The question arises whether the errors are typically underestimates or overestimates, for the probes that are in error. If we are systematically overestimating output probe intensity, then we might consider our estimates of $\delta$ to be generally too large. Alternatively, if we are systematically underestimating output probe intensity, we might thing that our estimates are too small.
End of explanation
# Violinplot of prediction error for probes where prediction fails
trt_errors = trt_pos[trt_pos['pred_success'] == False]
print("%d positive errors" % sum(trt_errors['y_pred_abs_error'] > 0))
print("%d negative errors" % sum(trt_errors['y_pred_abs_error'] < 0))
sns.violinplot(trt_errors['y_pred_abs_error']);
Explanation: In general, most probe predictions are within one log unit of the observed value for the probes corresponding to positive treatment effects. This is encouraging and suggests that, in general, the parameter estimates and model are appropriate.
End of explanation
# Count number of probes and failed predictions, by locus tag
collist = ['d_2.5pc', 'd_25pc', 'd_median', 'd_75pc', 'd_97.5pc',
'pred_success', 'type_m']
lt_pos = trt_pos.groupby('locus_tag').agg(sum)[collist]
lt_pos['fail_prop'] = lt_pos['type_m']/(lt_pos['type_m'] + lt_pos['pred_success'])
lt_pos.reset_index(inplace=True)
# Inspect output
lt_pos.head()
# Distribution of proportion of failed probes
sns.distplot(lt_pos['fail_prop'], bins=10);
Explanation: <div class="alert-success">
<b>The violinplot of errors for probes whose observed output isn't in the 90% CI for the predicted output indicates that most errors are less than 2 log units from the observed value in either direction. The number of positive and negative errors are also very similar, suggesting that there is no systematic bias towards over- or under-estimation.</b>
</div>
<a id="questionable"></a>
Questionable locus tags
Although our results seem to be on the whole quite sound, there are some locus tags for which the results may be more questionable. We can identify the total number of probes for each locus tag, and the number of probe output predictions that are in error, and determine which locus tags appear to have an unusually high proportion of errors as having more questionable estimates.
We generate a dataframe, indexed by locus tag, with a column fail_prop that describes the proportion of probe predictions that are in error.
End of explanation
# Make list of locus tags with more than 50% probe errors
q_tags = lt_pos.loc[lt_pos['fail_prop'] > 0.5]
print(q_tags.shape, sum(q_tags['type_m']))
Explanation: From this data, we see that most locus tags in this dataset have no probe errors, and a small proportion have more than 50% probe errors. We can inspect the predictions for those locus tags directly.
End of explanation
# Plot predictions for each questionable locus tag
print(q_tags['locus_tag'])
for lt in q_tags['locus_tag']:
tools.plot_locustag_predictions(trt_pos, lt)
Explanation: <div class="alert-success">
<b>This identifies 10 questionable locus tags, with 110 Type M probe errors, in total.</b>
</div>
We use the function plot_locustag_predictions() to visualise the prediction results for these genes directly. In these plots, values 0-2 indicate control replicates, and values 3-5 treatment replicates. The grey points show measured input intensity for the probe, and the black points show measured output intensity. The coloured points show predicted median values, and the bars indicate the 90% credibility interval for the prediction.
Yellow and blue bars indicate that the observed output intensity lies within the 90% CI; green and red bars indicate that the observed output value lies outwith the 90% CI.
End of explanation |
14,793 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Features as a Representation of Time Series for Classification
version 0.1
By AA Miller (Northwestern CIERA/Adler Planetarium)
10 June 2019
This lecture is about machine learning...
But honestly, this lecture isn't really about machine learning...
This lecture is about the classification of variable sources in astronomical survey data. There are many different ways to approach such a classification problem, and today we will use a machine leaning approach to accomplish this task.
As a(n incredibly) brief reminder, machine learning algorithms use a training set with known labels$^1$ to develop a mapping between the data and the labels. You can, and should, think of this mapping as a black box. The mapping can occur between the raw data and the labels (e.g., neural net classification of images) or between representative features$^2$ and the labels.
$^1$ Labels are the parameters of interest to be estimated (a variable star classification in this case).
$^2$Features = measured properties of the sources.
Once the mapping between the data and the labels has been learned from the training set, new classifications can be obtained by applying the machine learning model to sources where the labels are unknown.
Break Out Problem 1
Why would it be useful to measure features from astronomical light curves in order to classify them in an automated fashion?
Solution to Break Out 1
Write your answer here
The peculiarities of astronomical light curves (observational gaps, heteroskedastic uncertainties, etc) makes it difficult to compare any 2 random sources. For example, the cadence of observations in one portion of the sky will ultimately be very different from any other point on the sky separated by an appreciable distance ($\sim 100^\circ$ for LSST).
The use of features allows us to place all sources on the same basis. In this way it then becomes possible to make 1 to 1 comparisons between sources with different observing sequences.
Problem 1) The ML Training Set
Here we are going to define some helper functions that you may find helpful in your efforts to build this variable star classification model.
These functions include lc_plot, which will produce a nice plot of the light curve showing the full duration of the observations as well as a phase folded light curve.
And read_lc, which can quickly read the data format provided for the light curves.
Step1: If you did not already have the training set, download and unpack the tarball.
%> tar -zxvf feature_engineering.tar.gz
We will be working with data from the ASAS survey, and I have already curated a training set that only includes stars in 1 of 7 classes
Step2: Problem 1b
Plot an example RR Lyrae light curve.
Step3: Problem 1c
Plot an example detatched eclipsing binary (EB) light curve.
Step4: Problem 1d
Plot an example semi-detatched EB light curve.
Step5: Problem 1e
Plot an example W UMa EB light curve.
Step6: Problem 1f
Plot an example Cepheid light curve.
Step7: Problem 1g
Plot an example R Cor Bor star light curve.
Step8: Problem 2) Worry About the Data
Feature engineering is all about domain expertise. Before you begin the process of adding, creating, and removing features, it is important to develop some intuition for what features might be helpful (or in other words... worry about the data).
Problem 2a
Examine the light curves of at least two sources from each source in the training set. In the text cell below, write characteristics of the different classes that you notice which may be helpful for classification.
The class of every source in the training set is listed in training_sources.csv. The helper functions in Problem 1 can be used to examine the light curves.
Hint – if you want to examine phase-folded light curves, such as those shown above, you will need to measure the period for each source. Check your notes from Session 13 if you don't remember how to do this.
Step9: write your answer here
Problem 3) Machine Learning Classification
To classify newly observed light curves we need a machine learning model.
Previously I said this is not a machine learning problem, and that is because we will all use the same pre-specified model. I have provided a file training_sources.csv which includes the name of the sources, along with some features, and their classification.
Problem 3a
Read in the training set file, and create a feature vector X and label array y.
Step10: The provided training set comes with 3 features
Step11: Problem 4) Feature Engineering
It should now be clear why this is not a machine learning problem. We have, in this case, provided all the machine learning code that you will need.
Instead, this is a feature engineering problem. Feature engineering requires the utilization of domain knowledge to create new features for a data set to improve the performance of machine learning algorithms.
Add new features - if necessary
- Utilize domain knowledge to create/compute new features
- Combine features or represent them in an alternative fashion
Remove noisy/uniformative features - if necessary
- Determine feature importance (RF)
- Forward/backward selection to iteratively remove features
Below I have provided a partial function calc_feature which you can alter to calculate new features for the data set.
Step12: Your objective now is to apply your domain knowledge of astronomical signals to improve the provided machine learning model via feature engineering (and only feature engineering - do not attempt to use other models or change the model parameters).
Below are 3 problems you should attempt to answer, but note - these problems are not necessarily independent and do not need to be completed sequentially.
With a partner answer the following
Step13: After you have read that data you must calculate features on the test set using exactly the same method that you employed for the training set.
Note - if you created a new calc_feature script for every feature that you calculated, this should be straightforward.
Step14: Problem 5a
Calculate the accuracy of your model via an analysis of the independent test set. A helper function has been provided for you to do this below. | Python Code:
def lc_plot(t, m, m_unc, period=0.0):
if period == 0.0:
fig, ax = plt.subplots()
ax.errorbar(t, m, m_unc,
fmt='o', color='MediumAquaMarine',
mec="0.2",mew=0.5)
ax.set_xlabel('HJD (d)')
ax.set_ylabel(r'$V_\mathrm{ASAS}\;(\mathrm{mag})$')
fig.gca().invert_yaxis()
elif period != 0.0:
fig = plt.figure()
gs = GridSpec.GridSpec(5, 1)
ax_full = plt.subplot(gs[:2, :])
ax_full.errorbar(t, m, m_unc,
fmt='o', color='MediumAquaMarine',
mec="0.2",mew=0.5)
ax_full.set_xlabel('HJD (d)')
ax_full.set_ylabel(r'$V_\mathrm{ASAS}\;(\mathrm{mag})$')
plt.gca().invert_yaxis()
ax_phase = plt.subplot(gs[2:, :])
for repeat in [-1, 0, 1]:
ax_phase.errorbar(t/period % 1 + repeat, m, m_unc,
fmt='o', color='MediumAquaMarine',
mec="0.2",mew=0.5)
ax_phase.axvline(x=0, ls='--', color='0.8', lw=1, zorder=3)
ax_phase.axvline(x=1, ls='--', color='0.8', lw=1, zorder=3)
ax_phase.set_xlim(-0.2, 1.2)
ax_phase.set_xlabel('Phase')
ax_phase.set_ylabel(r'$V_\mathrm{ASAS}\;(\mathrm{mag})$')
plt.gca().invert_yaxis()
plt.tight_layout()
def read_lc(filename):
hjd, mag, mag_unc = np.loadtxt(filename, unpack=True)
return hjd, mag, mag_unc
Explanation: Features as a Representation of Time Series for Classification
version 0.1
By AA Miller (Northwestern CIERA/Adler Planetarium)
10 June 2019
This lecture is about machine learning...
But honestly, this lecture isn't really about machine learning...
This lecture is about the classification of variable sources in astronomical survey data. There are many different ways to approach such a classification problem, and today we will use a machine leaning approach to accomplish this task.
As a(n incredibly) brief reminder, machine learning algorithms use a training set with known labels$^1$ to develop a mapping between the data and the labels. You can, and should, think of this mapping as a black box. The mapping can occur between the raw data and the labels (e.g., neural net classification of images) or between representative features$^2$ and the labels.
$^1$ Labels are the parameters of interest to be estimated (a variable star classification in this case).
$^2$Features = measured properties of the sources.
Once the mapping between the data and the labels has been learned from the training set, new classifications can be obtained by applying the machine learning model to sources where the labels are unknown.
Break Out Problem 1
Why would it be useful to measure features from astronomical light curves in order to classify them in an automated fashion?
Solution to Break Out 1
Write your answer here
The peculiarities of astronomical light curves (observational gaps, heteroskedastic uncertainties, etc) makes it difficult to compare any 2 random sources. For example, the cadence of observations in one portion of the sky will ultimately be very different from any other point on the sky separated by an appreciable distance ($\sim 100^\circ$ for LSST).
The use of features allows us to place all sources on the same basis. In this way it then becomes possible to make 1 to 1 comparisons between sources with different observing sequences.
Problem 1) The ML Training Set
Here we are going to define some helper functions that you may find helpful in your efforts to build this variable star classification model.
These functions include lc_plot, which will produce a nice plot of the light curve showing the full duration of the observations as well as a phase folded light curve.
And read_lc, which can quickly read the data format provided for the light curves.
End of explanation
# Mira example
t, m, m_unc = read_lc("./training_lcs/181637+0341.6")
lc_plot(t, m, m_unc, period=150.461188)
Explanation: If you did not already have the training set, download and unpack the tarball.
%> tar -zxvf feature_engineering.tar.gz
We will be working with data from the ASAS survey, and I have already curated a training set that only includes stars in 1 of 7 classes: Mira variables, RR Lyrae stars, detatched eclipsing binaries, semi-detatched eclipsing binaries, W UMa binaries, Cepheids, and R Cor Bor stars.
[If you don't know what any of these things are, don't worry, we have examples below.]
Problem 1a
Plot an example Mira light curve.
Hint - just execute the cell.
End of explanation
# RRL example
t, m, m_unc = read_lc("./training_lcs/011815-3912.8")
lc_plot(t, m, m_unc, period=0.510918)
Explanation: Problem 1b
Plot an example RR Lyrae light curve.
End of explanation
# dEB example
t, m, m_unc = read_lc("./training_lcs/153835-6727.8")
lc_plot(t, m, m_unc, period=2*1.107174)
Explanation: Problem 1c
Plot an example detatched eclipsing binary (EB) light curve.
End of explanation
# aEB example
t, m, m_unc = read_lc("./training_lcs/141748-5311.2")
lc_plot(t, m, m_unc, period=1.514158)
Explanation: Problem 1d
Plot an example semi-detatched EB light curve.
End of explanation
# WU example
t, m, m_unc = read_lc("./training_lcs/193546-1136.3")
lc_plot(t, m, m_unc, period=0.424015)
Explanation: Problem 1e
Plot an example W UMa EB light curve.
End of explanation
# Cepheid example
t, m, m_unc = read_lc("./training_lcs/065640+0011.4")
lc_plot(t, m, m_unc, period=4.022837)
Explanation: Problem 1f
Plot an example Cepheid light curve.
End of explanation
# R Cor Bor example
t, m, m_unc = read_lc("./training_lcs/163242-5315.6")
lc_plot(t, m, m_unc, period=0.0)
Explanation: Problem 1g
Plot an example R Cor Bor star light curve.
End of explanation
# complete
Explanation: Problem 2) Worry About the Data
Feature engineering is all about domain expertise. Before you begin the process of adding, creating, and removing features, it is important to develop some intuition for what features might be helpful (or in other words... worry about the data).
Problem 2a
Examine the light curves of at least two sources from each source in the training set. In the text cell below, write characteristics of the different classes that you notice which may be helpful for classification.
The class of every source in the training set is listed in training_sources.csv. The helper functions in Problem 1 can be used to examine the light curves.
Hint – if you want to examine phase-folded light curves, such as those shown above, you will need to measure the period for each source. Check your notes from Session 13 if you don't remember how to do this.
End of explanation
train_df = pd.read_csv("training_sources.csv")
X_train = np.array(train_df[["mean", "nobs", "duration"]])
y_train = np.array(train_df["Class"])
Explanation: write your answer here
Problem 3) Machine Learning Classification
To classify newly observed light curves we need a machine learning model.
Previously I said this is not a machine learning problem, and that is because we will all use the same pre-specified model. I have provided a file training_sources.csv which includes the name of the sources, along with some features, and their classification.
Problem 3a
Read in the training set file, and create a feature vector X and label array y.
End of explanation
def calc_cv_score(X, y):
rf_clf = RandomForestClassifier(n_estimators=150, min_samples_leaf=1)
cv_score = cross_val_score(rf_clf, X, y, cv=10, n_jobs=-1)
print("These features have CV accuracy = {:.4f} +/- {:.4f}".format(np.mean(cv_score), np.std(cv_score, ddof=1)))
calc_cv_score( # complete
Explanation: The provided training set comes with 3 features: i) the mean magnitude of the observations, ii) the total number of observations obtained, and iii) the duration of the observations.
Problem 3b
Using the helper function provided below, calculate the 10-fold cross-validation accuracy of a random forest machine learning model using the 3 features provided above.
Note - do not adjust any part of calc_cv_score throughout this exercise.
End of explanation
def calc_feature(df, train=True):
if train==True:
lc_dir = "./training_lcs/"
else:
lc_dir = "./test_lcs/"
feature = np.empty(len(df))
for source_num, asas_id in enumerate(df["ASAS_ID"]):
t, m, mu = read_lc(lc_dir+asas_id)
# feature calculations
# feature calculations
# feature calculations
feature[source_num] = feat_val
return feature
Explanation: Problem 4) Feature Engineering
It should now be clear why this is not a machine learning problem. We have, in this case, provided all the machine learning code that you will need.
Instead, this is a feature engineering problem. Feature engineering requires the utilization of domain knowledge to create new features for a data set to improve the performance of machine learning algorithms.
Add new features - if necessary
- Utilize domain knowledge to create/compute new features
- Combine features or represent them in an alternative fashion
Remove noisy/uniformative features - if necessary
- Determine feature importance (RF)
- Forward/backward selection to iteratively remove features
Below I have provided a partial function calc_feature which you can alter to calculate new features for the data set.
End of explanation
test_df = pd.read_csv("test_sources.csv")
X_test = np.array(test_df[["mean", "nobs", "duration"]])
y_test = np.array(test_df["Class"])
Explanation: Your objective now is to apply your domain knowledge of astronomical signals to improve the provided machine learning model via feature engineering (and only feature engineering - do not attempt to use other models or change the model parameters).
Below are 3 problems you should attempt to answer, but note - these problems are not necessarily independent and do not need to be completed sequentially.
With a partner answer the following:
Problem 4a
What is the best simple feature you can add to the model to improve the classification performance?
Why simple? Because speed matters. If you need to classify $10^7$ LSST sources, you cannot run models that take several minutes to hours to run...
Note - simple means can be executed on the full training set in a matter of seconds ($\lesssim 100\,\mathrm{s}$).
Problem 4b
What is the best individual feature you can add to the model to improve the classification performance?
Problem 4c
What combination of features provides the best classification accuracy for the model?
Hint 1 - use calc_cv_score to measure the classification performance.
Hint 2 - if your efforts are limited by file read times, consider calculating multiple features within the function calc_feature.
Hint 3 - you are in pairs for a reason. If you decide to attempt a very complicated feature that requires long runtimes, proceed with that calculation on one person's laptop, while working on some other feature calculation on the other person's laptop.
Hint 4 - be very careful about book keeping and variable names. You don't want to have to repeat a complex calculation because you accidentally renamed an active variable in your namespace.
Hint 5 - do not destroy any code that you write to calculate features. Ultimately, you will need to apply your feature calculations to a test set of new sources and it will be essential that the calculations are done in a reproducible way.
Hint 6 - pay attention to how long it takes for your feature calculations to run. If you have anything that takes $\gtrsim 30\,\mathrm{min}$ let me know immediately.
We will compare answers at the end of the lecture.
Problem 5) Testing the Model on Independent Light Curves
You can load the test set using the commands below.
Do not snoop into the test set This problem is only for the very very end (i.e., with about 10 min to go in this breakout).
End of explanation
### Calculate features for the test set here
Explanation: After you have read that data you must calculate features on the test set using exactly the same method that you employed for the training set.
Note - if you created a new calc_feature script for every feature that you calculated, this should be straightforward.
End of explanation
from sklearn.metrics import accuracy_score
def calc_model_acc(X_train, y_train, X_test, y_test):
'''
Parameters
----------
X_train - arr_like, (n_source, n_feat) shape
Feature set for the training set. A 2D array
containing one row for every source, and one
column for every feature in the training set.
y_train - arr_like, (n_source,) shape
Labels for the training set, with one label
per source.
X_test - arr_like, (n_source, n_feat) shape
Feature set for the test set. A 2D array
containing one row for every source, and one
column for every feature in the training set.
y_test - arr_like, (n_source,) shape
Labels for the test set, with one label
per source.
'''
rf_clf = RandomForestClassifier(n_estimators=150, min_samples_leaf=1)
rf_clf.fit(X_train, y_train)
y_pred = rf_clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Your model accuracy is: {:.2f}".format(accuracy*100))
Explanation: Problem 5a
Calculate the accuracy of your model via an analysis of the independent test set. A helper function has been provided for you to do this below.
End of explanation |
14,794 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Data Augmentation
Este notebook ha sido creado para el uso de Data augmentation sobre nuestro conjunto de caras. Con el objetivo de aumentar y ofrecer un conjunto más variado de imagenes.
Cargamos las caras
Step1: Realizamos algunos imports necesarios
Step2: Antes de nada, vamos a realizar una pequeña muestra de los resultados obtenidos con una única imagen.
Step3: Mostramos los resultados
Step4: Una vez, que ya hemos visto un ejemplo sobre una imagen. Procedemos a aplicar la técnica de Data Augmentation sobre todo nuestro conjunto de imagenes. | Python Code:
from sklearn.datasets import fetch_lfw_people
# Importamos mediante una de las dos alternativas
# 1ª alternativa devuelve las imagenes en RGB pero con sus
# respectivos tres valores
faces = fetch_lfw_people(color = True)
positive_patches = faces.images
positive_patches.shape
Explanation: Data Augmentation
Este notebook ha sido creado para el uso de Data augmentation sobre nuestro conjunto de caras. Con el objetivo de aumentar y ofrecer un conjunto más variado de imagenes.
Cargamos las caras:
End of explanation
%matplotlib inline
from skimage.exposure import rescale_intensity
from skimage import io, data
from skimage.transform import rescale
import matplotlib.pyplot as plt
import sys
sys.path.append("../../rsc/img/imgaug")
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
from scipy import ndimage, misc
from matplotlib import gridspec
import six
import six.moves as sm
Explanation: Realizamos algunos imports necesarios:
End of explanation
# Conversiones necesarias
image = positive_patches[1]
image = np.array(image).astype(np.ubyte)
st = lambda aug: iaa.Sometimes(0.5, aug)
seq = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
st(iaa.Crop(percent=(0, 0.1))),
st(iaa.GaussianBlur((0, 3.0))),
st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5)),
st(iaa.Dropout((0.0, 0.1), per_channel=0.5)),
st(iaa.Add((-10, 10), per_channel=0.5)),
st(iaa.Multiply((0.5, 1.5), per_channel=0.5)),
st(iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5)),
st(iaa.Grayscale(alpha=(0.0, 1.0), name="Grayscale")),
st(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_px={"x": (-16, 16), "y": (-16, 16)},
rotate=(-45, 45),
shear=(-16, 16),
order=[0, 1],
cval=(0, 1.0),
mode=ia.ALL
)),
st(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25))
],
random_order=True
)
grid = seq.draw_grid_v2(image, cols=8, rows=8)
Explanation: Antes de nada, vamos a realizar una pequeña muestra de los resultados obtenidos con una única imagen.
End of explanation
# Mostramos la imagen resultante
fig, axes = plt.subplots(nrows=8, ncols=8, figsize=(16, 16),
# sharex=True, sharey=True,
#subplot_kw={'adjustable':'box-forced'}
)
axes = axes.ravel()
for index in range(len(grid)):
axes[index].imshow(grid[index])
axes[index].axis('off');
fig.subplots_adjust(hspace=0.01, wspace=0.01, top=0.9, bottom=0, left=0,right=1)
plt.show()
Explanation: Mostramos los resultados:
End of explanation
for img_index in range(len(positive_patches)):
img = positive_patches[img_index]
# 1º Realizamos las conversiones necesarias
# para que todas las imagenes puedan sen procesadas
img = np.array(img).astype(np.ubyte)
# 2º Procesamos imagen
grid = seq.draw_grid_v2(img, cols=8, rows=8)
# 3º Guardamos imagen
for j in range(len(grid)):
io.imsave("../../rsc/img/imgaug/"+str(img_index)+str(j)+".jpg", grid[j])
Explanation: Una vez, que ya hemos visto un ejemplo sobre una imagen. Procedemos a aplicar la técnica de Data Augmentation sobre todo nuestro conjunto de imagenes.
End of explanation |
14,795 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
UCI Datasets
Step1: The standard datasets are taken from the UCI Machine Learning Repository. For each dataset, header rows are added manually.
Binary
Ionosphere radar data, where 'good' is the positive label.
Step2: Pima Indians diabetes.
Step3: Sonar data, where we want to discriminate between sonar signals bounced off a metal cylinder and those bounced off a roughly cylindrical rock. Metal cylinder is considered as the positive label.
Step4: Prognostic Wisconsin breast cancer. Recurrence is the positive label.
Step5: MAGIC gamma telescope.
Step6: MiniBooNE particle identification
Step7: Multiclass
Classic iris dataset from Fisher with three classes.
Step8: Glass identification, seven classes.
Step9: Classifying a given sihouette as one of four types of vehicle.
Step10: Using chemical analysis to determine the origin of wines.
Step11: Page blocks | Python Code:
import os
import pandas as pd
import numpy as np
from mclearn.tools import fetch_data, download_data
%load_ext autoreload
%autoreload 2
uci_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/'
Explanation: UCI Datasets
End of explanation
url = uci_url + 'ionosphere/ionosphere.data'
dest = 'data/ionosphere.csv'
header = ','.join("x{0}".format(i) for i in np.arange(1, 35)) + ',target'
data = fetch_data(url, dest, header, label={'b': 0, 'g': 1})
data.head()
Explanation: The standard datasets are taken from the UCI Machine Learning Repository. For each dataset, header rows are added manually.
Binary
Ionosphere radar data, where 'good' is the positive label.
End of explanation
url = uci_url + 'pima-indians-diabetes/pima-indians-diabetes.data'
dest = 'data/pima.csv'
header = 'preg,glucose,diastolic,skin,insulin,bmi,pedi,age,target'
remove_missing = lambda df: df[df['diastolic'] > 0]
data = fetch_data(url, dest, header, process_fn=remove_missing)
data.head()
Explanation: Pima Indians diabetes.
End of explanation
url = uci_url + 'undocumented/connectionist-bench/sonar/sonar.all-data'
dest = 'data/sonar.csv'
header = ','.join('e{0}'.format(i) for i in np.arange(1, 61)) + ',target'
data = fetch_data(url, dest, header, label={'R': 0, 'M': 1})
data.head()
Explanation: Sonar data, where we want to discriminate between sonar signals bounced off a metal cylinder and those bounced off a roughly cylindrical rock. Metal cylinder is considered as the positive label.
End of explanation
url = uci_url + 'breast-cancer-wisconsin/wpbc.data'
dest = 'data/wpbc.csv'
header = 'id,target,time,rad1,text1,peri1,area1,smooth1,compact1,concave1,' \
'conpt1,sym1,fract1,rad2,text2,peri2,area2,smooth2,compact2,concave2,' \
'conpt2,sym2,fract2,rad3,text3,peri3,area3,smooth3,compact3,concave3,' \
'conpt3,sym3,fract3,tumor,lymph'
data = fetch_data(url, dest, header, label={'N': 0, 'R': 1})
data.head()
Explanation: Prognostic Wisconsin breast cancer. Recurrence is the positive label.
End of explanation
url = uci_url + 'magic/magic04.data'
dest = 'data/magic.csv'
header = 'length,width,size,conc,conc1,asym,m3long,m3trans,alpha,dist,target'
data = fetch_data(url, dest, header, label={'g': 1, 'h': 0})
data.head()
Explanation: MAGIC gamma telescope.
End of explanation
url = uci_url + '00199/MiniBooNE_PID.txt'
dest = 'data/miniboone.csv'
download_data(url, dest)
header = ['e{0}'.format(i) for i in np.arange(1, 51)]
data = pd.read_csv(dest, sep='\s+', skiprows=1,
header=None, names=header, na_values=[-999])
data['target'] = 1
data.loc[36499:, 'target'] = 0
data = data[['target']].join(data.drop('target', axis=1))
data.dropna(axis=0, how='any', inplace=True)
data.to_csv(dest, index=False, float_format='%.12g')
data.head()
Explanation: MiniBooNE particle identification
End of explanation
url = uci_url + 'iris/iris.data'
dest = 'data/iris.csv'
header = 'sepal_l,sepal_w,petal_l,petal_w,target'
data = fetch_data(url, dest, header)
data.head()
Explanation: Multiclass
Classic iris dataset from Fisher with three classes.
End of explanation
url = uci_url + 'glass/glass.data'
dest = 'data/glass.csv'
header = 'ri,na,mg,al,si,k,ca,ba,fe,target'
data = fetch_data(url, dest, header)
data.head()
Explanation: Glass identification, seven classes.
End of explanation
names = ['xaa', 'xab', 'xac', 'xad', 'xae', 'xaf', 'xag', 'xah', 'xai']
urls = ['{0}statlog/vehicle/{1}.dat'.format(uci_url, x) for x in names]
dest = 'data/vehicle.csv'
header = 'compact circ dcirc rrat prar mlen scat elon prr mlenr svarmaj ' \
'svarmin gy skewmaj skewmin kurtmin kurtmaj hol target placeholder'
data = fetch_data(urls, dest, header, sep=' ')
data.head()
Explanation: Classifying a given sihouette as one of four types of vehicle.
End of explanation
url = uci_url + 'wine/wine.data'
dest = 'data/wine.csv'
header = 'target,alcohol,malic,ash,alcash,mg,phenols,' \
'flav,nonflav,proan,color,hue,od280,proline'
data = fetch_data(url, dest, header)
data.head()
Explanation: Using chemical analysis to determine the origin of wines.
End of explanation
url = uci_url + 'page-blocks/page-blocks.data.Z'
zip_dest = 'data/pageblocks.csv.Z'
dest = 'data/pageblocks.csv'
download_data(url, zip_dest)
os.system('uncompress {filename}'.format(filename=dest))
header = ['height', 'length', 'area', 'eccen', 'pblack', 'pand',
'meantr', 'blackpix', 'blackand', 'wbtrans', 'target']
data = pd.read_csv(dest, sep='\s+', header=None, names=header)
data = data[['target']].join(data.drop('target', axis=1))
data.dropna(axis=0, how='any', inplace=True)
data.to_csv(dest, index=False, float_format='%.12g')
data.head()
Explanation: Page blocks
End of explanation |
14,796 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Problem-set-Jupyter-Pyplot-and-Numpy
Write a note about the data set
Fisher's Iris Data Set is a well known data set that has become a common test case in machine learning. Each row in the data set is comprised of four numeric values for petal length, petal width, sepal length and sepal width. The row also contains the type of iris flower (one of three
Step1: Create a simple plot
Step2: Create a more complex plot
Step3: Use seaborn
Step4: Fit a line
Step5: Calculate the R-squared value
Step6: Fit another line
Step7: Calculate the R-squared value
Step8: Use gradient descent
Gradient Descent is an approximation technique. To utilize this approximation technique, we guess the value that we wish to approximate and iteratively improve that guess.
Step9: To the human eye it is difficult to see a difference between the best fit line and the best fit line approximated by gradient descent.
Step10: However, the results from the two techniques are in fact different. With both results for m and c differing after the eleventh decimal point, the gradient descent technique did manage to approximate adequate results; although inexact and inaccurate. | Python Code:
import numpy as np
# Load in data from csv file.
sepal_length, sepal_width, petal_length, petal_width = np.genfromtxt('../data/IRIS.csv', delimiter=',', usecols=(0,1,2,3), unpack=True, dtype=float)
iris_class = np.genfromtxt('../data/IRIS.csv', delimiter=',', usecols=(4), unpack=True, dtype=str)
# Loaded the columns into separate variables for ease of use.
Explanation: Problem-set-Jupyter-Pyplot-and-Numpy
Write a note about the data set
Fisher's Iris Data Set is a well known data set that has become a common test case in machine learning. Each row in the data set is comprised of four numeric values for petal length, petal width, sepal length and sepal width. The row also contains the type of iris flower (one of three: Iris setosa, Iris versicolor, or Iris virginica).
According to Lichman [1],
"One class is linearly separable from the other 2; the latter are NOT linearly separable from each other".
Types are clustered together and can be analysed to distinguish or predict the type of iris flower by it's measurements (petal length, petal width, sepal length and sepal width)[2].
References:
[1] Lichman, M. (2013). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.
[2] True, Joseph - Content Data Scientist (2015). IMB Watson Analytics [https://www.ibm.com/communities/analytics/watson-analytics-blog/watson-analytics-use-case-the-iris-data-set/].
Get and load the data
End of explanation
import matplotlib.pyplot as plt
# Plot Sepal Length on the x-axis and Sepal Width on the y-axis; complete with labels.
# Scale graph to a bigger size
plt.rcParams['figure.figsize'] = (14.0, 6.0)
# Set title
plt.title('Iris Data Set: Sepal Measurements', fontsize=16)
# plot scatter graph
plt.scatter(sepal_length, sepal_width)
# Add labels
plt.xlabel('Sepal Length', fontsize=14)
plt.ylabel('Sepal Width', fontsize=14)
# Output Graph
plt.show()
Explanation: Create a simple plot
End of explanation
# https://matplotlib.org/users/legend_guide.html
import matplotlib.patches as mp
# https://stackoverflow.com/questions/27318906/python-scatter-plot-with-colors-corresponding-to-strings
colours = {'Iris-setosa': 'r', 'Iris-versicolor': 'g', 'Iris-virginica': 'b'}
plt.scatter(sepal_length, sepal_width, c=[colours[i] for i in iris_class], label=[colours[i] for i in colours])
# Add title
plt.title('Iris Setosa, Versicolor, and Virginica: Sepal Measurements', fontsize=16)
# Add labels
plt.xlabel('Sepal Length', fontsize=14)
plt.ylabel('Sepal Width', fontsize=14)
# https://matplotlib.org/api/patches_api.html
plt.legend(handles = [mp.Patch(color=colour, label=label) for label, colour in [('Iris Setosa', 'r'), ('Iris Versicolor', 'g'), ('Iris Virginica', 'b')]])
plt.show()
Explanation: Create a more complex plot
End of explanation
import seaborn as sns
import pandas as pd
# Prepare data with pandas DataFrame for seaborn usage.
df = pd.DataFrame(dict(zip(['Sepal Length', 'Sepal Width','Petal Length', 'Petal Width', 'Iris Class'], [sepal_length, sepal_width, petal_length, petal_width, iris_class])))
df
# Adapted from: https://seaborn.pydata.org/examples/scatterplot_matrix.html
%matplotlib inline
sns.pairplot(df, hue="Iris Class")
Explanation: Use seaborn
End of explanation
# Reset size after seaborn
plt.rcParams['figure.figsize'] = (14.0, 6.0)
# https://github.com/emerging-technologies/emerging-technologies.github.io/blob/master/notebooks/simple-linear-regression.ipynb
# Calculate the best values for m and c.
m, c = np.polyfit(petal_length, petal_width, 1)
# Plot Setosa measurements
plt.scatter(petal_length, petal_width,marker='o', label='Data Set')
# Plot best fit line
plt.plot(petal_length, m * petal_length + c, 'forestgreen', label='Best fit line')
# Add title
plt.title('Iris Data Set: Petal Measurements', fontsize=16)
# Add labels
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
plt.legend()
# Print graph
plt.show()
Explanation: Fit a line
End of explanation
# Calculate the R-squared value for our data set using numpy.
np.corrcoef(petal_length, petal_width)[0][1]**2
Explanation: Calculate the R-squared value
End of explanation
# https://stackoverflow.com/questions/27947487/is-zip-the-most-efficient-way-to-combine-arrays-with-respect-to-memory-in-nump
# Combine arrays
iris_data = np.column_stack((sepal_length, sepal_width, petal_length, petal_width,iris_class))
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.in1d.html
# Filter Data with 'Iris-setosa' & transpose after
filter_setosa = (iris_data[np.in1d(iris_data[:,4],'Iris-setosa')]).transpose()
# https://stackoverflow.com/questions/3877491/deleting-rows-in-numpy-array
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.chararray.astype.html
# Prepare data - delete row of unnecessary data and cover types to float
setosa_data = (np.delete(filter_setosa, (4), axis=0)).astype(np.float)
setosa_data
# https://github.com/emerging-technologies/emerging-technologies.github.io/blob/master/notebooks/simple-linear-regression.ipynb
# Calculate the best values for m and c.
m, c = np.polyfit(setosa_data[2], setosa_data[3], 1)
# Plot Setosa measurements
plt.scatter(setosa_data[2],setosa_data[3],marker='o', label='Iris Setosa')
# Plot best fit line
plt.plot(setosa_data[2], m * setosa_data[2] + c, 'forestgreen', label='Best fit line')
# Add title
plt.title('Iris Setosa: Petal Measurements', fontsize=16)
# Add labels
plt.xlabel('Petal Length', fontsize=14)
plt.ylabel('Petal Width', fontsize=14)
plt.legend()
# Print graph
plt.show()
Explanation: Fit another line
End of explanation
# Calculate the R-squared value for the Setosa data using numpy.
np.corrcoef(setosa_data[2], setosa_data[3])[0][1]**2
Explanation: Calculate the R-squared value
End of explanation
# Calculate the partial derivative of cost with respect to m while treating c as a constant.
def gradient_descent_m(x, y, m, c):
return -2.0 * np.sum(x * (y - m * x - c))
# Calculate the partial derivative of cost with respect to c while treating m as a constant.
def gradient_descent_c(x, y, m , c):
return -2.0 * np.sum(y - m * x - c)
eta = 0.0001
g_m, g_c = 1.0, 1.0
change = True
# Iterate the partial derivatives until the outcomes do not change
while change:
g_m_new = g_m - eta * gradient_descent_m(setosa_data[2], setosa_data[3], g_m, g_c)
g_c_new = g_c - eta * gradient_descent_c(setosa_data[2], setosa_data[3], g_m, g_c)
if g_m == g_m_new and g_c == g_c_new:
change = False
else:
g_m, g_c = g_m_new, g_c_new
Explanation: Use gradient descent
Gradient Descent is an approximation technique. To utilize this approximation technique, we guess the value that we wish to approximate and iteratively improve that guess.
End of explanation
# Plot Setosa measurements
plt.scatter(setosa_data[2],setosa_data[3],marker='o', label='Iris Setosa')
# Plot best fit line according to Gradient Descent
plt.plot(setosa_data[2], g_m * setosa_data[2] + g_c, 'forestgreen', label='Best fit line: Gradient Descent')
# Add title
plt.title('Iris Setosa: Petal Measurements', fontsize=16)
# Add labels
plt.xlabel('Petal Length')
plt.ylabel('Petal Width')
plt.legend()
# Print graph
plt.show()
Explanation: To the human eye it is difficult to see a difference between the best fit line and the best fit line approximated by gradient descent.
End of explanation
print("BEST LINE: m: %20.16f c: %20.16f" % (m, c))
print()
print("GRADIENT DESCENT: m: %20.16f c: %20.16f" % (g_m, g_c))
Explanation: However, the results from the two techniques are in fact different. With both results for m and c differing after the eleventh decimal point, the gradient descent technique did manage to approximate adequate results; although inexact and inaccurate.
End of explanation |
14,797 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Tutorial Part 3
Step1: Now, let's use MoleculeNet to load the Tox21 dataset. We need to make sure to process the data in a way that graph convolutional networks can use For that, we make sure to set the featurizer option to 'GraphConv'. The MoleculeNet call will return a training set, an validation set, and a test set for us to use. The call also returns transformers, a list of data transformations that were applied to preprocess the dataset. (Most deep networks are quite finicky and require a set of data transformations to ensure that training proceeds stably.)
Step2: Let's now train a graph convolutional network on this dataset. DeepChem has the class GraphConvModel that wraps a standard graph convolutional architecture underneath the hood for user convenience. Let's instantiate an object of this class and train it on our dataset.
Step3: Let's plot these losses so we can take a look at how the loss changes over the process of training.
Step4: We see that the losses fall nicely and give us stable learning.
Let's try to evaluate the performance of the model we've trained. For this, we need to define a metric, a measure of model performance. dc.metrics holds a collection of metrics already. For this dataset, it is standard to use the ROC-AUC score, the area under the receiver operating characteristic curve (which measures the tradeoff between precision and recall). Luckily, the ROC-AUC score is already available in DeepChem.
To measure the performance of the model under this metric, we can use the convenience function model.evaluate().
Step5: What's going on under the hood? Could we build GraphConvModel ourselves? Of course! The first step is to create a TensorGraph object. This object will hold the "computational graph" that defines the computation that a graph convolutional network will perform.
Step6: Let's now define the inputs to our model. Conceptually, graph convolutions just requires a the structure of the molecule in question and a vector of features for every atom that describes the local chemical environment. However in practice, due to TensorFlow's limitations as a general programming environment, we have to have some auxiliary information as well preprocessed.
atom_features holds a feature vector of length 75 for each atom. The other feature inputs are required to support minibatching in TensorFlow. degree_slice is an indexing convenience that makes it easy to locate atoms from all molecules with a given degree. membership determines the membership of atoms in molecules (atom i belongs to molecule membership[i]). deg_adjs is a list that contains adjacency lists grouped by atom degree For more details, check out the code.
To define feature inputs in TensorGraph, we use the Feature layer. Conceptually, a TensorGraph is a mathematical graph composed of layer objects. Features layers have to be the root nodes of the graph since they consitute inputs.
Step7: Let's now implement the body of the graph convolutional network. TensorGraph has a number of layers that encode various graph operations. Namely, the GraphConv, GraphPool and GraphGather layers. We will also apply standard neural network layers such as Dense and BatchNorm.
The layers we're adding effect a "feature transformation" that will create one vector for each molecule.
Step8: Let's now make predictions from the TensorGraph model. Tox21 is a multitask dataset. That is, there are 12 different datasets grouped together, which share many common molecules, but with different outputs for each. As a result, we have to add a separate output layer for each task. We will use a for loop over the tox21_tasks list to make this happen. We need to add labels for each
We also have to define a loss for the model which tells the network the objective to minimize during training.
We have to tell TensorGraph which layers are outputs with TensorGraph.add_output(layer). Similarly, we tell the network its loss with TensorGraph.set_loss(loss).
Step9: Now that we've successfully defined our graph convolutional model in TensorGraph, we need to train it. We can call fit(), but we need to make sure that each minibatch of data populates all four Feature objects that we've created. For this, we need to create a Python generator that given a batch of data generates a dictionary whose keys are the Feature layers and whose values are Numpy arrays we'd like to use for this step of training.
Step10: Now, we can train the model using TensorGraph.fit_generator(generator) which will use the generator we've defined to train the model.
Step11: Let's now plot these losses and take a quick look.
Step13: Now that we have trained our graph convolutional method, let's evaluate its performance. We again have to use our defined generator to evaluate model performance. | Python Code:
import deepchem as dc
from deepchem.models.tensorgraph.models.graph_models import GraphConvModel
Explanation: Tutorial Part 3: Introduction to Graph Convolutions
In the previous sections of the tutorial, we learned about Dataset and Model objects. We learned how to load some data into DeepChem from files on disk and also learned some basic facts about molecular data handling. We then dove into some basic deep learning architectures and explored DeepChem's TensorGraph framework for deep learning. However, until now, we stuck with vanilla deep learning architectures and didn't really consider how to handle deep architectures specifically engineered to work with life science data.
In this tutorial, we'll change that by going a little deeper and learn about "graph convolutions." These are one of the most powerful deep learning tools for working with molecular data. The reason for this is that molecules can be naturally viewed as graphs.
Note how standard chemical diagrams of the sort we're used to from high school lend themselves naturally to visualizing molecules as graphs. In the remainder of this tutorial, we'll dig into this relationship in significantly more detail. This will let us get an in-the guts understanding of how these systems work.
End of explanation
# Load Tox21 dataset
tox21_tasks, tox21_datasets, transformers = dc.molnet.load_tox21(featurizer='GraphConv')
train_dataset, valid_dataset, test_dataset = tox21_datasets
Explanation: Now, let's use MoleculeNet to load the Tox21 dataset. We need to make sure to process the data in a way that graph convolutional networks can use For that, we make sure to set the featurizer option to 'GraphConv'. The MoleculeNet call will return a training set, an validation set, and a test set for us to use. The call also returns transformers, a list of data transformations that were applied to preprocess the dataset. (Most deep networks are quite finicky and require a set of data transformations to ensure that training proceeds stably.)
End of explanation
model = GraphConvModel(
len(tox21_tasks), batch_size=50, mode='classification')
num_epochs = 10
losses = []
for i in range(num_epochs):
loss = model.fit(train_dataset, nb_epoch=1)
print("Epoch %d loss: %f" % (i, loss))
losses.append(loss)
Explanation: Let's now train a graph convolutional network on this dataset. DeepChem has the class GraphConvModel that wraps a standard graph convolutional architecture underneath the hood for user convenience. Let's instantiate an object of this class and train it on our dataset.
End of explanation
import matplotlib.pyplot as plot
plot.ylabel("Loss")
plot.xlabel("Epoch")
x = range(num_epochs)
y = losses
plot.scatter(x, y)
plot
Explanation: Let's plot these losses so we can take a look at how the loss changes over the process of training.
End of explanation
import numpy as np
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
print("Training ROC-AUC Score: %f" % train_scores["mean-roc_auc_score"])
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Validation ROC-AUC Score: %f" % valid_scores["mean-roc_auc_score"])
Explanation: We see that the losses fall nicely and give us stable learning.
Let's try to evaluate the performance of the model we've trained. For this, we need to define a metric, a measure of model performance. dc.metrics holds a collection of metrics already. For this dataset, it is standard to use the ROC-AUC score, the area under the receiver operating characteristic curve (which measures the tradeoff between precision and recall). Luckily, the ROC-AUC score is already available in DeepChem.
To measure the performance of the model under this metric, we can use the convenience function model.evaluate().
End of explanation
from deepchem.models.tensorgraph.tensor_graph import TensorGraph
tg = TensorGraph(use_queue=False)
Explanation: What's going on under the hood? Could we build GraphConvModel ourselves? Of course! The first step is to create a TensorGraph object. This object will hold the "computational graph" that defines the computation that a graph convolutional network will perform.
End of explanation
import tensorflow as tf
from deepchem.models.tensorgraph.layers import Feature
atom_features = Feature(shape=(None, 75))
degree_slice = Feature(shape=(None, 2), dtype=tf.int32)
membership = Feature(shape=(None,), dtype=tf.int32)
deg_adjs = []
for i in range(0, 10 + 1):
deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)
deg_adjs.append(deg_adj)
Explanation: Let's now define the inputs to our model. Conceptually, graph convolutions just requires a the structure of the molecule in question and a vector of features for every atom that describes the local chemical environment. However in practice, due to TensorFlow's limitations as a general programming environment, we have to have some auxiliary information as well preprocessed.
atom_features holds a feature vector of length 75 for each atom. The other feature inputs are required to support minibatching in TensorFlow. degree_slice is an indexing convenience that makes it easy to locate atoms from all molecules with a given degree. membership determines the membership of atoms in molecules (atom i belongs to molecule membership[i]). deg_adjs is a list that contains adjacency lists grouped by atom degree For more details, check out the code.
To define feature inputs in TensorGraph, we use the Feature layer. Conceptually, a TensorGraph is a mathematical graph composed of layer objects. Features layers have to be the root nodes of the graph since they consitute inputs.
End of explanation
from deepchem.models.tensorgraph.layers import Dense, GraphConv, BatchNorm
from deepchem.models.tensorgraph.layers import GraphPool, GraphGather
batch_size = 50
gc1 = GraphConv(
64,
activation_fn=tf.nn.relu,
in_layers=[atom_features, degree_slice, membership] + deg_adjs)
batch_norm1 = BatchNorm(in_layers=[gc1])
gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] + deg_adjs)
gc2 = GraphConv(
64,
activation_fn=tf.nn.relu,
in_layers=[gp1, degree_slice, membership] + deg_adjs)
batch_norm2 = BatchNorm(in_layers=[gc2])
gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] + deg_adjs)
dense = Dense(out_channels=128, activation_fn=tf.nn.relu, in_layers=[gp2])
batch_norm3 = BatchNorm(in_layers=[dense])
readout = GraphGather(
batch_size=batch_size,
activation_fn=tf.nn.tanh,
in_layers=[batch_norm3, degree_slice, membership] + deg_adjs)
Explanation: Let's now implement the body of the graph convolutional network. TensorGraph has a number of layers that encode various graph operations. Namely, the GraphConv, GraphPool and GraphGather layers. We will also apply standard neural network layers such as Dense and BatchNorm.
The layers we're adding effect a "feature transformation" that will create one vector for each molecule.
End of explanation
from deepchem.models.tensorgraph.layers import Dense, SoftMax, \
SoftMaxCrossEntropy, WeightedError, Stack
from deepchem.models.tensorgraph.layers import Label, Weights
costs = []
labels = []
for task in range(len(tox21_tasks)):
classification = Dense(
out_channels=2, activation_fn=None, in_layers=[readout])
softmax = SoftMax(in_layers=[classification])
tg.add_output(softmax)
label = Label(shape=(None, 2))
labels.append(label)
cost = SoftMaxCrossEntropy(in_layers=[label, classification])
costs.append(cost)
all_cost = Stack(in_layers=costs, axis=1)
weights = Weights(shape=(None, len(tox21_tasks)))
loss = WeightedError(in_layers=[all_cost, weights])
tg.set_loss(loss)
Explanation: Let's now make predictions from the TensorGraph model. Tox21 is a multitask dataset. That is, there are 12 different datasets grouped together, which share many common molecules, but with different outputs for each. As a result, we have to add a separate output layer for each task. We will use a for loop over the tox21_tasks list to make this happen. We need to add labels for each
We also have to define a loss for the model which tells the network the objective to minimize during training.
We have to tell TensorGraph which layers are outputs with TensorGraph.add_output(layer). Similarly, we tell the network its loss with TensorGraph.set_loss(loss).
End of explanation
from deepchem.metrics import to_one_hot
from deepchem.feat.mol_graphs import ConvMol
def data_generator(dataset, epochs=1, predict=False, pad_batches=True):
for epoch in range(epochs):
if not predict:
print('Starting epoch %i' % epoch)
for ind, (X_b, y_b, w_b, ids_b) in enumerate(
dataset.iterbatches(
batch_size, pad_batches=pad_batches, deterministic=True)):
d = {}
for index, label in enumerate(labels):
d[label] = to_one_hot(y_b[:, index])
d[weights] = w_b
multiConvMol = ConvMol.agglomerate_mols(X_b)
d[atom_features] = multiConvMol.get_atom_features()
d[degree_slice] = multiConvMol.deg_slice
d[membership] = multiConvMol.membership
for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
d[deg_adjs[i - 1]] = multiConvMol.get_deg_adjacency_lists()[i]
yield d
Explanation: Now that we've successfully defined our graph convolutional model in TensorGraph, we need to train it. We can call fit(), but we need to make sure that each minibatch of data populates all four Feature objects that we've created. For this, we need to create a Python generator that given a batch of data generates a dictionary whose keys are the Feature layers and whose values are Numpy arrays we'd like to use for this step of training.
End of explanation
# Epochs set to 1 to render tutorials online.
# Set epochs=10 for better results.
num_epochs = 10
losses = []
for i in range(num_epochs):
loss = tg.fit_generator(data_generator(train_dataset, epochs=1))
print("Epoch %d loss: %f" % (i, loss))
losses.append(loss)
Explanation: Now, we can train the model using TensorGraph.fit_generator(generator) which will use the generator we've defined to train the model.
End of explanation
plot.title("TensorGraph Version")
plot.ylabel("Loss")
plot.xlabel("Epoch")
x = range(num_epochs)
y = losses
plot.scatter(x, y)
plot
Explanation: Let's now plot these losses and take a quick look.
End of explanation
metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
def reshape_y_pred(y_true, y_pred):
TensorGraph.Predict returns a list of arrays, one for each output
We also have to remove the padding on the last batch
Metrics taks results of shape (samples, n_task, prob_of_class)
n_samples = len(y_true)
retval = np.stack(y_pred, axis=1)
return retval[:n_samples]
print("Evaluating model")
train_predictions = tg.predict_on_generator(data_generator(train_dataset, predict=True))
train_predictions = reshape_y_pred(train_dataset.y, train_predictions)
train_scores = metric.compute_metric(train_dataset.y, train_predictions, train_dataset.w)
print("Training ROC-AUC Score: %f" % train_scores)
valid_predictions = tg.predict_on_generator(data_generator(valid_dataset, predict=True))
valid_predictions = reshape_y_pred(valid_dataset.y, valid_predictions)
valid_scores = metric.compute_metric(valid_dataset.y, valid_predictions, valid_dataset.w)
print("Valid ROC-AUC Score: %f" % valid_scores)
Explanation: Now that we have trained our graph convolutional method, let's evaluate its performance. We again have to use our defined generator to evaluate model performance.
End of explanation |
14,798 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
Laplace approximation ( Quadratic approximation)
In this notebook we will approximate posterior of beta-bernouli model for coin toss problem using laplace approximation method
Step1: Laplace approximation from scratch in JAX
As mentioned in book2 section 7.4.3, Using laplace approximation, any distribution can be approximated as normal distribution having mean $\hat{\theta}$ and standard deviation as $H^{-1}$
\begin{align}
H = \triangledown ^2_{\theta = \hat{\theta}} \log p(\theta|\mathcal{D}) \
p(\theta|\mathcal{D}) = \frac{1}{Z}p(\theta|\mathcal{D}) = \mathcal{N}(\theta |\hat{\theta}, H^{-1})
\end{align}
Where H is Hessian and $\hat{\theta}$ is the mode
Find $\hat{\theta}$
No we find $\hat{\theta}$ ($\theta$_map) by minimizing negative log prior-likelhihood.
Step2: loc and scale of approximated normal posterior
Step3: True posterior and laplace approximated posterior
Step4: Pymc | Python Code:
try:
from probml_utils import latexify, savefig
except:
%pip install git+https://github.com/probml/probml-utils.git
from probml_utils import latexify, savefig
import jax
import jax.numpy as jnp
from jax import lax
try:
from tensorflow_probability.substrates import jax as tfp
except ModuleNotFoundError:
%pip install -qqq tensorflow_probability
from tensorflow_probability.substrates import jax as tfp
try:
import optax
except ModuleNotFoundError:
%pip install -qqq optax
import optax
try:
from rich import print
except ModuleNotFoundError:
%pip install -qqq rich
from rich import print
try:
from tqdm import trange
except:
%pip install -qqq tqdm
from tqdm import trange
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
dist = tfp.distributions
latexify(width_scale_factor=2, fig_height=2) # to apply latexify, set LATEXIFY=1 in environment variable
# Use same data as https://github.com/probml/probml-notebooks/blob/main/notebooks/beta_binom_approx_post_pymc.ipynb
key = jax.random.PRNGKey(128)
dataset = np.repeat([0, 1], (10, 1))
n_samples = len(dataset)
print(f"Dataset: {dataset}")
n_heads = dataset.sum()
n_tails = n_samples - n_heads
# prior distribution ~ Beta
def prior_dist():
return dist.Beta(concentration1=1.0, concentration0=1.0)
# likelihood distribution ~ Bernoulli
def likelihood_dist(theta):
return dist.Bernoulli(probs=theta)
# closed form of beta posterior
a = prior_dist().concentration1
b = prior_dist().concentration0
exact_posterior = dist.Beta(concentration1=a + n_heads, concentration0=b + n_tails)
theta_range = jnp.linspace(0.01, 0.99, 100)
ax = plt.gca()
ax2 = ax.twinx()
(plt2,) = ax2.plot(theta_range, exact_posterior.prob(theta_range), "g--", label="True Posterior")
(plt3,) = ax2.plot(theta_range, prior_dist().prob(theta_range), label="Prior")
likelihood = jax.vmap(lambda x: jnp.prod(likelihood_dist(x).prob(dataset)))(theta_range)
(plt1,) = ax.plot(theta_range, likelihood, "r-.", label="Likelihood")
ax.set_xlabel("theta")
ax.set_ylabel("Likelihood")
ax2.set_ylabel("Prior & Posterior")
ax2.legend(handles=[plt1, plt2, plt3], bbox_to_anchor=(1.6, 1));
Explanation: Laplace approximation ( Quadratic approximation)
In this notebook we will approximate posterior of beta-bernouli model for coin toss problem using laplace approximation method
End of explanation
def neg_log_prior_likelihood_fn(params, dataset):
theta = params["theta"]
likelihood_log_prob = likelihood_dist(theta).log_prob(dataset).sum() # log probability of likelihood
prior_log_prob = prior_dist().log_prob(theta) # log probability of prior
return -(likelihood_log_prob + prior_log_prob) # negative log_prior_liklihood
loss_and_grad_fn = jax.value_and_grad(neg_log_prior_likelihood_fn)
params = {"theta": 0.5}
neg_joint_log_prob, grads = loss_and_grad_fn(params, dataset)
optimizer = optax.adam(0.01)
opt_state = optimizer.init(params)
@jax.jit
def train_step(carry, data_output):
params = carry["params"]
neg_joint_log_prob, grads = loss_and_grad_fn(params, dataset)
opt_state = carry["opt_state"]
updates, opt_state = optimizer.update(grads, opt_state)
params = optax.apply_updates(params, updates)
carry = {"params": params, "opt_state": opt_state}
data_output = {"params": params, "loss": neg_joint_log_prob}
return carry, data_output
carry = {"params": params, "opt_state": opt_state}
data_output = {"params": params, "loss": neg_joint_log_prob}
n = 100
iterator = jnp.ones(n)
last_carry, output = jax.lax.scan(train_step, carry, iterator)
loss = output["loss"]
plt.plot(loss, label="loss")
plt.legend();
optimized_params = last_carry["params"]
theta_map = optimized_params["theta"]
print(f"theta_map = {theta_map}")
Explanation: Laplace approximation from scratch in JAX
As mentioned in book2 section 7.4.3, Using laplace approximation, any distribution can be approximated as normal distribution having mean $\hat{\theta}$ and standard deviation as $H^{-1}$
\begin{align}
H = \triangledown ^2_{\theta = \hat{\theta}} \log p(\theta|\mathcal{D}) \
p(\theta|\mathcal{D}) = \frac{1}{Z}p(\theta|\mathcal{D}) = \mathcal{N}(\theta |\hat{\theta}, H^{-1})
\end{align}
Where H is Hessian and $\hat{\theta}$ is the mode
Find $\hat{\theta}$
No we find $\hat{\theta}$ ($\theta$_map) by minimizing negative log prior-likelhihood.
End of explanation
loc = theta_map # loc of approximate posterior
print(f"loc = {loc}")
# scale of approximate posterior
scale = 1 / jnp.sqrt(jax.hessian(neg_log_prior_likelihood_fn)(optimized_params, dataset)["theta"]["theta"])
print(f"scale = {scale}")
Explanation: loc and scale of approximated normal posterior
End of explanation
plt.figure()
y = jnp.exp(dist.Normal(loc, scale).log_prob(theta_range))
plt.title("Quadratic approximation")
plt.plot(theta_range, y, label="laplace approximation", color="tab:red")
plt.plot(theta_range, exact_posterior.prob(theta_range), label="true posterior", color="tab:green", linestyle="--")
plt.xlabel("$\\theta$")
plt.ylabel("$p(\\theta)$")
sns.despine()
plt.legend()
savefig("bb_laplace") # set FIG_DIR = "path/to/figure" enviornment variable to save figure
Explanation: True posterior and laplace approximated posterior
End of explanation
try:
import pymc3 as pm
except ModuleNotFoundError:
%pip install -qq pymc3
import pymc3 as pm
try:
import scipy.stats as stats
except ModuleNotFoundError:
%pip install -qq scipy
import scipy.stats as stats
import scipy.special as sp
try:
import arviz as az
except ModuleNotFoundError:
%pip install -qq arviz
import arviz as az
import math
# Laplace
with pm.Model() as normal_aproximation:
theta = pm.Beta("theta", 1.0, 1.0)
y = pm.Binomial("y", n=1, p=theta, observed=dataset) # Bernoulli
mean_q = pm.find_MAP()
std_q = ((1 / pm.find_hessian(mean_q, vars=[theta])) ** 0.5)[0]
loc = mean_q["theta"]
# plt.savefig('bb_laplace.pdf');
x = theta_range
plt.figure()
plt.plot(x, stats.norm.pdf(x, loc, std_q), "--", label="Laplace")
post_exact = stats.beta.pdf(x, n_heads + 1, n_tails + 1)
plt.plot(x, post_exact, label="exact")
plt.title("Quadratic approximation")
plt.xlabel("θ", fontsize=14)
plt.yticks([])
plt.legend()
Explanation: Pymc
End of explanation |
14,799 | Given the following text description, write Python code to implement the functionality described below step by step
Description:
<font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 6</font>
Download
Step1: Leitura de dados | Python Code:
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
Explanation: <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 6</font>
Download: http://github.com/dsacademybr
End of explanation
import sqlite3
import random
import time
import datetime
# Criando uma conexão
conn = sqlite3.connect('dsa.db')
# Criando um cursor
c = conn.cursor()
# Função para criar uma tabela
def create_table():
c.execute('CREATE TABLE IF NOT EXISTS produtos(id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, date TEXT, '\
'prod_name TEXT, valor REAL)')
# Função para inserir uma linha
def data_insert():
c.execute("INSERT INTO produtos VALUES(002, '02-05-2020', 'teclado', 130 )")
conn.commit()
c.close()
conn.close()
# Usando variáveis para inserir dados
def data_insert_var():
new_date = datetime.datetime.now()
new_prod_name = 'monitor'
new_valor = random.randrange(50,100)
c.execute("INSERT INTO produtos (date, prod_name, valor) VALUES (?, ?, ?, ?)",
(new_date, new_prod_name, new_valor))
conn.commit()
# Leitura de dados
def leitura_todos_dados():
c.execute("SELECT * FROM PRODUTOS")
for linha in c.fetchall():
print(linha)
# Leitura de registros específicos
def leitura_registros():
c.execute("SELECT * FROM PRODUTOS WHERE valor > 60.0")
for linha in c.fetchall():
print(linha)
# Leitura de colunas específicos
def leitura_colunas():
c.execute("SELECT * FROM PRODUTOS")
for linha in c.fetchall():
print(linha[3])
# Select nos dados
leitura_todos_dados()
# Leitura de registros específicos
leitura_registros()
# Leitura de colunas específicas
leitura_colunas()
# Encerrando a conexão
c.close()
conn.close()
Explanation: Leitura de dados
End of explanation |
Subsets and Splits