path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
notebooks/publications/old/fig5_dnf_analysis_and_illum_opt.ipynb | ###Markdown
Blur Len vs Beta
###Code
# blur_len = np.arange(1, 100)
# beta = np.arange(0.1,1.0, 0.01)
# image = []
# for _len in blur_len:
# for _beta in beta:
# image.append(analysis.getOptimalDnf(_len, _beta))
# plt.figure()
# plt.imshow(np.asarray(np.log10(image)).reshape(len(blur_len), len(beta)), vmin=0, vmax=2)
###Output
_____no_output_____
###Markdown
Show kernel and padded kernel in frequency domain
###Code
x = np.zeros(100)
x[5] = 1
x[19] =1
x[14] =1
x_padded =np.pad(x, (0,100), mode='constant')
plt.figure()
plt.plot(np.abs(np.fft.fft(x)) ** 2, label='original')
plt.plot(np.abs(np.fft.fft(x_padded)) ** 2, label='padded')
plt.legend()
plt.xlabel('Fourier Coefficient')
plt.ylabel('Magnitude')
plt.title('Effect of Zero-padding')
plt.tight_layout()
print(analysis.calcCondNumFromKernel(x))
print(analysis.calcCondNumFromKernel(x_padded))
print(analysis.calcDnfFromKernel(x))
print(analysis.calcDnfFromKernel(x_padded))
###Output
_____no_output_____
###Markdown
Pulse Length and DNF
###Code
kernel_len_list = np.arange(11,1000)
n_pulses = 10
dnf_list = []
for kernel_len in kernel_len_list:
dnf_list.append(analysis.getOptimalDnf(kernel_len, n_pulses=n_pulses, n_tests=100))
plt.figure()
plt.plot(dnf_list)
plt.xlabel('Sequence Length')
plt.ylabel('DNF')
plt.title('Effect of Sequence Length on DNF, %d pulses' % n_pulses)
plt.tight_layout()
kernel_len_list = np.arange(51,1000)
n_pulses = 50
dnf_list = []
for kernel_len in kernel_len_list:
dnf_list.append(analysis.getOptimalDnf(kernel_len, n_pulses=n_pulses, n_tests=100))
plt.figure()
plt.plot(dnf_list)
plt.xlabel('Sequence Length')
plt.ylabel('DNF')
plt.title('Effect of Sequence Length on DNF, %d pulses' % n_pulses)
plt.tight_layout()
###Output
_____no_output_____
###Markdown
DNF vs Pulse Count
###Code
pulse_count_list = np.arange(3,1500)
dnf_list = []
for pulse_count in pulse_count_list:
dnf_list.append(analysis.getOptimalDnf(pulse_count * 2, n_pulses=pulse_count, n_tests=100))
# Perform log fit
coeffs = np.polyfit(np.log10(pulse_count_list), dnf_list, 1)
y = coeffs[0] * np.log10(pulse_count_list) + coeffs[1]
def func_powerlaw(x, m):
return np.sqrt(2) * x**m - 1
sol1, _ = sp.optimize.curve_fit(func_powerlaw, pulse_count_list, dnf_list, maxfev=2000 )
# sol1 = [0.6116, np.sqrt(2), -1]
yp = func_powerlaw(pulse_count_list, sol1[0])
plt.figure(figsize=(12,5))
plt.plot(pulse_count_list, dnf_list, label='Calculated DNF')
plt.plot(pulse_count_list, yp, label='Power Law Fit')
plt.xlabel('Pulse Count')
plt.ylabel('DNF')
plt.title('Effect of Pulse Count on DNF')
plt.legend()
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Try with $3\times$ pulse count
###Code
pulse_count_list = np.arange(3,500)
n_pulses = 50
dnf_list = []
for pulse_count in pulse_count_list:
dnf_list.append(analysis.getOptimalDnf(pulse_count * 3, n_pulses=pulse_count, n_tests=100))
# Perform log fit
coeffs = np.polyfit(np.log10(pulse_count_list), dnf_list, 1)
y = coeffs[0] * np.log10(pulse_count_list) + coeffs[1]
def func_powerlaw(x, m):
return np.sqrt(2) * x**m - 1
sol1, _ = sp.optimize.curve_fit(func_powerlaw, pulse_count_list, dnf_list, maxfev=2000 )
# sol1 = [0.6116, np.sqrt(2), -1]
yp = func_powerlaw(pulse_count_list, sol1[0])
plt.figure(figsize=(12,5))
plt.plot(pulse_count_list, dnf_list, label='Calculated DNF')
plt.plot(pulse_count_list, yp, label='Power Law Fit')
plt.xlabel('Pulse Count')
plt.ylabel('DNF')
plt.title('Effect of Pulse Count on DNF')
plt.legend()
plt.tight_layout()
###Output
_____no_output_____
###Markdown
What does the SNR vs n_pulses curve look like?
###Code
N = np.arange(3,500)
c = 10
snr_strobed = np.sqrt(c)
f = func_powerlaw(pulse_count_list, sol1[0])
snr_imaging = np.sqrt(N * c)
snr_dnf = snr_imaging / f
plt.figure()
plt.plot(pulse_count_list, snr_dnf, label='SNR Improvement')
plt.plot(pulse_count_list, snr_strobed * np.ones_like(snr_dnf), label='Baseline (Strobed)')
# plt.plot(pulse_count_list, snr_0, label='Imaging SNR')
plt.legend()
plt.xlabel('Number of Pulses')
plt.ylabel('SNR')
plt.tight_layout()
# signal_photons = 100
# noise_var = 1000
# G = np.sqrt(1 + noise_var / signal_photons)
# print(G)
###Output
_____no_output_____
###Markdown
What if you add signal-independent noise?
###Code
N = np.arange(3,500)
c = 1000
var_dependent = N * c
var_independent = 0
# Calculate DNF
f = func_powerlaw(pulse_count_list, sol1[0])
# Calculate SNR for astrobed and coded illum
snr_strobed = c / np.sqrt(c + var_independent)
snr_coded = c * N / (f * np.sqrt(N * c + var_independent))
plt.figure()
plt.plot(pulse_count_list, snr_coded, label='Decoded SNR')
plt.plot(pulse_count_list, snr_strobed * np.ones_like(snr_coded), label='Baseline (Strobed) SNR')
# plt.plot(pulse_count_list, snr_0, label='Imaging SNR')
plt.legend()
plt.xlabel('Number of Pulses')
plt.ylabel('SNR')
plt.xlim((0,300))
plt.tight_layout()
###Output
_____no_output_____
###Markdown
Plot SNR of Strobed and Coded Illumination Under Different Amounts of Readout Noise
###Code
N = np.arange(3,500)
c = 1000
var_dependent = N * c
var_independent = 500
# Calculate DNF as a function of N
dnf_list = func_powerlaw(pulse_count_list, sol1[0])
# Create variance list
var_independent_list = np.arange(0, 10000, 100)
plt.figure(figsize=(9,7))
snr_strobed_list, snr_coded_list = [], []
for var_independent in var_independent_list:
for dnf in dnf_list:
# Calculate SNR for astrobed and coded illum
snr_strobed = c / np.sqrt(c + var_independent)
snr_coded = c * N / (f * np.sqrt(N * c + var_independent))
snr_strobed_list.append(snr_strobed)
snr_coded_list.append(snr_coded)
snr_strobed_image = np.asarray(snr_strobed_list).reshape((len(var_independent_list), len(dnf_list)))
snr_coded_image = np.asarray(snr_coded_list).reshape((len(var_independent_list), len(dnf_list)))
###Output
_____no_output_____
###Markdown
Plot SNR of Strobed and Coded Illumination Under Different Amounts of Readout Noise
###Code
N = pulse_count_list
c = 1000
var_dependent = N * c
var_independent = 500
# Calculate DNF
f = func_powerlaw(pulse_count_list, sol1[0])
plt.figure(figsize=(9,7))
for index, var_independent in enumerate([0, 500, 1000, 5000]):
plt.subplot(411 + index)
# Calculate SNR for astrobed and coded illum
snr_strobed = c / np.sqrt(c + var_independent)
snr_coded = c * N / (f * np.sqrt(N * c + var_independent))
plt.plot(pulse_count_list, snr_coded, label='Decoded SNR', lw=3)
plt.plot(pulse_count_list, snr_strobed * np.ones_like(snr_coded), label='Baseline (Strobed) SNR', lw=3)
# plt.plot(pulse_count_list, snr_0, label='Imaging SNR')
if index ==0:
plt.legend()
plt.xlabel('Number of Pulses')
plt.ylabel('SNR')
plt.xlim((0,300))
plt.title('Signal-Independent Noise Variance: %d counts' % var_independent)
plt.tight_layout()
###Output
_____no_output_____ |
code/.ipynb_checkpoints/NN_based_models_v4-3-checkpoint.ipynb | ###Markdown
Table of Contents1 TextCNN1.1 notes:2 LSTM Table of Contents1 TextCNN1.1 notes:2 LSTM
###Code
from google.colab import drive
drive.mount('/content/drive')
import os
os.chdir("/content/drive/MyDrive/Text-Classification/code")
!pip install pyLDAvis
!pip install gensim
!pip install pandas==1.3.0
import nltk
nltk.download('punkt')
nltk.download('stopwords')
import numpy as np
from sklearn import metrics
from clustering_utils import *
from eda_utils import *
from nn_utils_keras import *
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
####################################
### string normalized
####################################
from gensim.utils import tokenize
from nltk.tokenize import word_tokenize
from gensim.parsing.preprocessing import remove_stopwords
def normal_string(x):
x = remove_stopwords(x)
# x = " ".join(preprocess_string(x))
x = " ".join(word_tokenize(x, preserve_line=False)).strip()
return x
train, test = load_data()
train, upsampling_info = upsampling_train(train)
train_text, train_label = train_augmentation(train, select_comb=[['text'], ['reply', 'reference_one'], ['Subject', 'reference_one', 'reference_two']])
# train_text, train_label = train_augmentation(train, select_comb=None)
test_text, test_label = test['text'], test['label']
# test_text = test_text.apply(lambda x: normal_string(x))
# train_text = train_text.apply(lambda x: normal_string(x))
####################################
### label mapper
####################################
labels = sorted(train_label.unique())
label_mapper = dict(zip(labels, range(len(labels))))
train_label = train_label.map(label_mapper)
test_label = test_label.map(label_mapper)
y_train = train_label
y_test = test_label
print(train_text.shape)
print(test_text.shape)
print(train_label.shape)
print(test_label.shape)
print(labels)
####################################
### hyper params
####################################
filters = '"#$%&()*+,-/:;<=>@[\\]^_`{|}~\t\n0123465789!.?\''
MAX_NB_WORDS_ratio = 0.98
MAX_DOC_LEN_ratio = 0.999
MAX_NB_WORDS = eda_MAX_NB_WORDS(train_text, ratio=MAX_NB_WORDS_ratio, char_level=False, filters=filters)
MAX_DOC_LEN = eda_MAX_DOC_LEN(train_text, ratio=MAX_DOC_LEN_ratio, char_level=False, filters=filters)
from tensorflow.keras import optimizers
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.layers import Embedding, Dense, Conv1D, MaxPooling1D, Dropout, Activation, Input, Flatten, Concatenate, Lambda
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.utils import to_categorical
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from tensorflow import keras
import numpy as np
import pandas as pd
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import os
###Output
_____no_output_____
###Markdown
TextCNN notes:
###Code
####################################
### train val test split
####################################
X_train_val, y_train_val, X_test, y_test = train_text, train_label, test_text, test_label
X_train, x_val, y_train, y_val = train_test_split(X_train_val, y_train_val, test_size=0.2, stratify=y_train_val)
####################################
### preprocessor for NN input
####################################
processor = text_preprocessor(MAX_DOC_LEN, MAX_NB_WORDS, train_text, filters='"#$%&()*+,-/:;<=>@[\\]^_`{|}~\t\n0123465789')
X_train = processor.generate_seq(X_train)
x_val = processor.generate_seq(x_val)
X_test = processor.generate_seq(X_test)
# y_train = to_categorical(y_train)
# y_val = to_categorical(y_val)
# y_test = to_categorical(y_test)
print('Shape of x_tr: ' + str(X_train.shape))
print('Shape of y_tr: ' + str(y_train.shape))
print('Shape of x_val: ' + str(x_val.shape))
print('Shape of y_val: ' + str(y_val.shape))
print('Shape of X_test: ' + str(X_test.shape))
print('Shape of y_test: ' + str(y_test.shape))
info = pd.concat([y_train.value_counts(), y_val.value_counts(), y_val.value_counts()/y_train.value_counts(), y_train.value_counts()/y_train.size\
, y_test.value_counts(), y_test.value_counts()/y_test.size], axis=1)
info.index = labels
info.columns = ['tr_size', 'val_size', 'val_ratio', 'tr_prop', 'test_size', 'test_prop']
info
# define Model for classification
def model_Create(FS, NF, EMB, MDL, MNW, PWV=None, optimizer='RMSprop', trainable_switch=True):
cnn_box = cnn_model_l2(FILTER_SIZES=FS, MAX_NB_WORDS=MNW, MAX_DOC_LEN=MDL, EMBEDDING_DIM=EMB,
NUM_FILTERS=NF, PRETRAINED_WORD_VECTOR=PWV, trainable_switch=trainable_switch)
# Hyperparameters: MAX_DOC_LEN
q1_input = Input(shape=(MDL,), name='q1_input')
encode_input1 = cnn_box(q1_input)
# half_features = int(len(FS)*NF/2)*10
x = Dense(384, activation='relu', name='half_features')(encode_input1)
x = Dropout(rate=0.3, name='dropout1')(x)
# x = Dense(256, activation='relu', name='dense1')(x)
# x = Dropout(rate=0.3, name='dropou2')(x)
x = Dense(128, activation='relu', name='dense2')(x)
x = Dropout(rate=0.3, name='dropout3')(x)
x = Dense(64, activation='relu', name='dense3')(x)
x = Dropout(rate=0.3, name='dropout4')(x)
pred = Dense(len(labels), activation='softmax', name='Prediction')(x)
model = Model(inputs=q1_input, outputs=pred)
model.compile(optimizer=optimizer,
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
return model
EMBEDDING_DIM = 200
# W2V = processor.w2v_pretrain(EMBEDDING_DIM, min_count=2, seed=1, cbow_mean=1,negative=5, window=20, workers=7) # pretrain w2v by gensim
# W2V = processor.load_glove_w2v(EMBEDDING_DIM) # download glove
W2V = None
trainable_switch = True
# Set hyper parameters
FILTER_SIZES = [2, 4,6,8]
# FILTER_SIZES = [2,3,4]
NUM_FILTERS = 64
# OPT = optimizers.Adam(learning_rate=0.005)
OPT = optimizers.RMSprop(learning_rate=0.0005) # 'RMSprop'
PWV = W2V
model = model_Create(FS=FILTER_SIZES, NF=NUM_FILTERS, EMB=EMBEDDING_DIM,
MDL=MAX_DOC_LEN, MNW=MAX_NB_WORDS+1, PWV=PWV,
optimizer=OPT, trainable_switch=trainable_switch)
# visual_textCNN(model)
BATCH_SIZE = 32 # 先在小的batch上train, 容易找到全局最优部分, 然后再到 大 batch 上train, 快速收敛到局部最优
NUM_EPOCHES = 50 # 20步以上
patience = 30
file_name = 'test'
BestModel_Name = file_name + 'Best_GS_3'
BEST_MODEL_FILEPATH = BestModel_Name
# model.load_weights(BestModel_Name) # 这样就能接着上次train
earlyStopping = EarlyStopping(monitor='val_sparse_categorical_accuracy', patience=patience, verbose=1, mode='max') # patience: number of epochs with no improvement on monitor : val_loss
checkpoint = ModelCheckpoint(BEST_MODEL_FILEPATH, monitor='val_sparse_categorical_accuracy', verbose=1, save_best_only=True, mode='max')
# history = model.fit(X_train, y_train, validation_data=(X_test,y_test), batch_size=BATCH_SIZE, epochs=NUM_EPOCHES, callbacks=[earlyStopping, checkpoint], verbose=1)
history = model.fit(X_train, y_train, validation_data=(x_val, y_val), batch_size=BATCH_SIZE, epochs=NUM_EPOCHES, callbacks=[earlyStopping, checkpoint], verbose=1)
model.load_weights(BestModel_Name)
#### classification Report
history_plot(history)
y_pred = model.predict(X_test)
# print(classification_report(y_test, np.argmax(y_pred, axis=1)))
print(classification_report(test_label, np.argmax(y_pred, axis=1), target_names=labels))
scores = model.evaluate(X_test, y_test, verbose=2)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print( "\n\n\n")
###Output
========================================================================
loss val_loss
###Markdown
LSTM
###Code
# from tensorflow.keras.layers import SpatialDropout1D, GlobalMaxPooling1D, GlobalMaxPooling2D
# def model_Create(FS, NF, EMB, MDL, MNW, PWV = None, optimizer='RMSprop', trainable_switch=True):
# model = Sequential()
# model.add(Embedding(input_dim=MNW, output_dim=EMB, embeddings_initializer='uniform', mask_zero=True, input_length=MDL))
# model.add(Flatten())
# # model.add(GlobalMaxPooling2D()) # downsampling
# # model.add(SpatialDropout1D(0.2))
# model.add(Dense(1024, activation='relu'))
# model.add(Dense(512, activation='relu'))
# model.add(Dense(128, activation='relu'))
# model.add(Dense(64, activation='relu'))
# # model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
# model.add(Dense(20, activation='softmax'))
# model.compile(optimizer=optimizer,
# loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False),
# metrics=[keras.metrics.SparseCategoricalAccuracy()])
# return model
# model = model_Create(FS=FILTER_SIZES, NF=NUM_FILTERS, EMB=EMBEDDING_DIM,
# MDL=MAX_DOC_LEN, MNW=MAX_NB_WORDS+1, PWV=PWV, trainable_switch=trainable_switch)
# visual_textCNN(model)
# EMBEDDING_DIM = 200
# # W2V = processor.w2v_pretrain(EMBEDDING_DIM, min_count=2, seed=1, cbow_mean=1,negative=5, window=20, workers=7) # pretrain w2v by gensim
# # W2V = processor.load_glove_w2v(EMBEDDING_DIM) # download glove
# trainable_switch = True
# W2V = None
# BATCH_SIZE = 64
# NUM_EPOCHES = 10 # patience=20
# patience = 30
# BestModel_Name = 'text_CNN.h5'
# BEST_MODEL_FILEPATH = BestModel_Name
# earlyStopping = EarlyStopping(monitor='val_sparse_categorical_accuracy', patience=patience, verbose=1, mode='max') # patience: number of epochs with no improvement on monitor : val_loss
# checkpoint = ModelCheckpoint(BEST_MODEL_FILEPATH, monitor='val_sparse_categorical_accuracy', verbose=1, save_best_only=True, mode='max')
# history = model.fit(X_train, y_train, validation_split=0.2, batch_size=BATCH_SIZE, epochs=NUM_EPOCHES, callbacks=[earlyStopping, checkpoint], verbose=1)
# model.load_weights(BestModel_Name)
# #### classification Report
# history_plot(history)
# y_pred = model.predict(X_test)
# # print(classification_report(y_test, np.argmax(y_pred, axis=1)))
# print(classification_report(test_label, np.argmax(y_pred, axis=1), target_names=labels))
# scores = model.evaluate(X_test, y_test, verbose=2)
# print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# print( "\n\n\n")
###Output
_____no_output_____ |
MachineLearning/gpcharts test.ipynb | ###Markdown
GooPyCharts Demo Notebook Import GooPyChartsTo cut down on syntax, import figure directly.
###Code
from gpcharts import figure
###Output
_____no_output_____
###Markdown
Simple Line GraphSame graph as described in the readme.
###Code
fig1 = figure()
fig1.plot([8,7,6,5,4])
###Output
_____no_output_____
###Markdown
Line Graph with Two LinesAnother line graph, but with two dependent variables. Also customizing plot.
###Code
fig2 = figure(title='Two lines',xlabel='Days',ylabel='Count',height=600,width=600)
xVals = ['Mon','Tues','Wed','Thurs','Fri']
yVals = [[5,4],[8,7],[4,8],[10,10],[3,12]]
fig2.plot(xVals,yVals)
###Output
_____no_output_____
###Markdown
DateTime GraphA graph with dates and times. Title is assigned afterwards, and data is given header information.
###Code
fig3 = figure()
fig3.title = 'Weather over Days'
fig3.ylabel = 'Temperature'
#modify size of graph
fig3.height = 800
fig3.width = 1000
###Output
_____no_output_____
###Markdown
X datetime data can take either of the following formats: "yyyy-mm-dd HH:MM:SS" or "yyyy-mm-dd", but be consistent.
###Code
#xVals = ['Dates','2016-03-20 00:00:00','2016-03-21 00:00:00','2016-03-25 00:00:00','2016-04-01 00:00:00']
xVals = ['Dates','2016-03-20','2016-03-21','2016-03-25','2016-04-01']
yVals = [['Shakuras','Korhal','Aiur'],[10,30,40],[12,28,41],[15,34,38],[8,33,47]]
fig3.plot(xVals,yVals)
###Output
_____no_output_____
###Markdown
A Log Scale ExampleSet "logScale=True" when calling plot (or plot_nb for notebooks) to plot the y axis in log scale.
###Code
fig4 = figure(title='Population Growth',ylabel='Population')
xVals = ['Year',1700,1800,1900,2000]
yVals = [['Gotham City', 'Central City'],[0,10],[100,200],[100000,500000],[5000000,10000000]]
fig4.plot(xVals,yVals,logScale=True)
###Output
_____no_output_____
###Markdown
Scatter PlotScatter plot arguments are the same as for normal line graph arguments, but use "scatter" (or "scatter_nb" for notebooks) to plot instead. Scatter plots also support trend lines. Set "trendline=True" in the arguments to get a trendline on your graph. Currently only a trendline for the first dependent variable is supported.
###Code
fig5 = figure('Strong Correlation')
fig5.scatter([1,2,3,4,5],[[1,5],[2,4],[3,3],[4,2],[5,1]],trendline=True)
###Output
_____no_output_____
###Markdown
Bar GraphSimple horizontal bar graphs are supported. Use function "bar" (or "bar_nb" for notebooks).
###Code
fig6 = figure('Percent Alcohol Consumption')
fig6.bar(['Percentage','Beer','Wine','Liquor'],['Type',40,50,10])
###Output
_____no_output_____
###Markdown
HistogramSimple histograms are also supported. Histograms take in 1 list of input. Use function "hist" (or "hist_nb" for notebooks).
###Code
fig7 = figure('Distribution',xlabel='value')
fig7.hist([1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,3,3,3,3,4,4,5,6,7,8,8,8,8,8,9,9,9,10,11,12,13,13,13,13,14])
###Output
_____no_output_____ |
doc/notebooks/demo3.ipynb | ###Markdown
Demo 3: HKR classifier on MNIST dataset[](https://colab.research.google.com/github/deel-ai/deel-lip/blob/master/doc/notebooks/demo3.ipynb)This notebook will demonstrate learning a binary task on the MNIST0-8 dataset.
###Code
# pip install deel-lip -qqq
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.python.keras.layers import Input, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import binary_accuracy
from tensorflow.keras.models import Sequential
from deel.lip.layers import (
SpectralConv2D,
SpectralDense,
FrobeniusDense,
ScaledL2NormPooling2D,
)
from deel.lip.activations import MaxMin, GroupSort, GroupSort2, FullSort
from deel.lip.losses import HKR, KR, HingeMargin
###Output
2021-09-08 18:34:34.803681: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0
###Markdown
data preparationFor this task we will select two classes: 0 and 8. Labels are changed to {-1,1}, wich is compatiblewith the Hinge term used in the loss.
###Code
from tensorflow.keras.datasets import mnist
# first we select the two classes
selected_classes = [0, 8] # must be two classes as we perform binary classification
def prepare_data(x, y, class_a=0, class_b=8):
"""
This function convert the MNIST data to make it suitable for our binary classification
setup.
"""
# select items from the two selected classes
mask = (y == class_a) + (
y == class_b
) # mask to select only items from class_a or class_b
x = x[mask]
y = y[mask]
x = x.astype("float32")
y = y.astype("float32")
# convert from range int[0,255] to float32[-1,1]
x /= 255
x = x.reshape((-1, 28, 28, 1))
# change label to binary classification {-1,1}
y[y == class_a] = 1.0
y[y == class_b] = -1.0
return x, y
# now we load the dataset
(x_train, y_train_ord), (x_test, y_test_ord) = mnist.load_data()
# prepare the data
x_train, y_train = prepare_data(
x_train, y_train_ord, selected_classes[0], selected_classes[1]
)
x_test, y_test = prepare_data(
x_test, y_test_ord, selected_classes[0], selected_classes[1]
)
# display infos about dataset
print(
"train set size: %i samples, classes proportions: %.3f percent"
% (y_train.shape[0], 100 * y_train[y_train == 1].sum() / y_train.shape[0])
)
print(
"test set size: %i samples, classes proportions: %.3f percent"
% (y_test.shape[0], 100 * y_test[y_test == 1].sum() / y_test.shape[0])
)
###Output
train set size: 11774 samples, classes proportions: 50.306 percent
test set size: 1954 samples, classes proportions: 50.154 percent
###Markdown
Build lipschitz ModelLet's first explicit the paremeters of this experiment
###Code
# training parameters
epochs = 10
batch_size = 128
# network parameters
activation = GroupSort # ReLU, MaxMin, GroupSort2
# loss parameters
min_margin = 1.0
alpha = 10.0
###Output
_____no_output_____
###Markdown
Now we can build the network.Here the experiment is done with a MLP. But `Deel-lip` also provide state of the art 1-Lipschitz convolutions.
###Code
K.clear_session()
# helper function to build the 1-lipschitz MLP
wass = Sequential(
layers=[
Input((28, 28, 1)),
Flatten(),
SpectralDense(32, GroupSort2(), use_bias=True),
SpectralDense(16, GroupSort2(), use_bias=True),
FrobeniusDense(1, activation=None, use_bias=False),
],
name="lipModel",
)
wass.summary()
optimizer = Adam(lr=0.001)
# as the output of our classifier is in the real range [-1, 1], binary accuracy must be redefined
def HKR_binary_accuracy(y_true, y_pred):
S_true = tf.dtypes.cast(tf.greater_equal(y_true[:, 0], 0), dtype=tf.float32)
S_pred = tf.dtypes.cast(tf.greater_equal(y_pred[:, 0], 0), dtype=tf.float32)
return binary_accuracy(S_true, S_pred)
wass.compile(
loss=HKR(
alpha=alpha, min_margin=min_margin
), # HKR stands for the hinge regularized KR loss
metrics=[
KR, # shows the KR term of the loss
HingeMargin(min_margin=min_margin), # shows the hinge term of the loss
HKR_binary_accuracy, # shows the classification accuracy
],
optimizer=optimizer,
)
###Output
_____no_output_____
###Markdown
Learn classification on MNISTNow the model is build, we can learn the task.
###Code
wass.fit(
x=x_train,
y=y_train,
validation_data=(x_test, y_test),
batch_size=batch_size,
shuffle=True,
epochs=epochs,
verbose=1,
)
###Output
Epoch 1/10
92/92 [==============================] - 2s 10ms/step - loss: -1.6675 - KR: 3.7144 - HingeMargin: 0.2047 - HKR_binary_accuracy: 0.9382 - val_loss: -5.0961 - val_KR: 5.5990 - val_HingeMargin: 0.0519 - val_HKR_binary_accuracy: 0.9786
Epoch 2/10
92/92 [==============================] - 1s 7ms/step - loss: -5.0297 - KR: 5.5716 - HingeMargin: 0.0542 - HKR_binary_accuracy: 0.9793 - val_loss: -5.4469 - val_KR: 5.7710 - val_HingeMargin: 0.0354 - val_HKR_binary_accuracy: 0.9879
Epoch 3/10
92/92 [==============================] - 1s 7ms/step - loss: -5.3788 - KR: 5.7838 - HingeMargin: 0.0405 - HKR_binary_accuracy: 0.9858 - val_loss: -5.6435 - val_KR: 5.9555 - val_HingeMargin: 0.0334 - val_HKR_binary_accuracy: 0.9860
Epoch 4/10
92/92 [==============================] - 1s 8ms/step - loss: -5.6172 - KR: 5.9671 - HingeMargin: 0.0350 - HKR_binary_accuracy: 0.9874 - val_loss: -5.7918 - val_KR: 6.0764 - val_HingeMargin: 0.0308 - val_HKR_binary_accuracy: 0.9879
Epoch 5/10
92/92 [==============================] - 1s 7ms/step - loss: -5.7598 - KR: 6.0676 - HingeMargin: 0.0308 - HKR_binary_accuracy: 0.9891 - val_loss: -5.8711 - val_KR: 6.1062 - val_HingeMargin: 0.0264 - val_HKR_binary_accuracy: 0.9899
Epoch 6/10
92/92 [==============================] - 1s 7ms/step - loss: -5.7647 - KR: 6.0829 - HingeMargin: 0.0318 - HKR_binary_accuracy: 0.9879 - val_loss: -5.8503 - val_KR: 6.1463 - val_HingeMargin: 0.0315 - val_HKR_binary_accuracy: 0.9879
Epoch 7/10
92/92 [==============================] - 1s 7ms/step - loss: -5.8007 - KR: 6.1082 - HingeMargin: 0.0307 - HKR_binary_accuracy: 0.9884 - val_loss: -5.8470 - val_KR: 6.1179 - val_HingeMargin: 0.0296 - val_HKR_binary_accuracy: 0.9879
Epoch 8/10
92/92 [==============================] - 1s 7ms/step - loss: -5.8268 - KR: 6.1185 - HingeMargin: 0.0292 - HKR_binary_accuracy: 0.9897 - val_loss: -5.8439 - val_KR: 6.1153 - val_HingeMargin: 0.0294 - val_HKR_binary_accuracy: 0.9889
Epoch 9/10
92/92 [==============================] - 1s 7ms/step - loss: -5.8865 - KR: 6.1548 - HingeMargin: 0.0268 - HKR_binary_accuracy: 0.9910 - val_loss: -5.8800 - val_KR: 6.1668 - val_HingeMargin: 0.0312 - val_HKR_binary_accuracy: 0.9874
Epoch 10/10
92/92 [==============================] - 1s 7ms/step - loss: -5.8578 - KR: 6.1453 - HingeMargin: 0.0288 - HKR_binary_accuracy: 0.9892 - val_loss: -5.9233 - val_KR: 6.1783 - val_HingeMargin: 0.0282 - val_HKR_binary_accuracy: 0.9889
|
data_512_a2/data_512_a2.ipynb | ###Markdown
A2: Bias in the DataXiaolu Qian In this notebook,
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
###Output
_____no_output_____
###Markdown
Step 0: Background and EDAIn this session, I will walk you through some visualizations and distributions for the demographic information of crowdflower workers who labeled toxicity and aggression. Those simple EDA helped me to think about the bias in data problem that I want to solve. Load the data related to toxicity and aggression
###Code
# data for toxicity
demographic_worker = pd.read_csv('toxicity_data/toxicity_worker_demographics.tsv', sep = '\t')
annotated_comments = pd.read_csv('toxicity_data/toxicity_annotated_comments.tsv', sep = '\t')
annotations = pd.read_csv('toxicity_data/toxicity_annotations.tsv', sep = '\t')
# data for aggression
demographic_worker_agg = pd.read_csv('~/Desktop/aggression_data/aggression_worker_demographics.tsv', sep = '\t')
annotated_comments_agg = pd.read_csv('~/Desktop/aggression_data/aggression_annotated_comments.tsv', sep = '\t')
annotations_agg = pd.read_csv('~/Desktop/aggression_data/aggression_annotations.tsv', sep = '\t')
###Output
_____no_output_____
###Markdown
Show percentage of the gender distribution of the workers who labeled toxicity
###Code
demographic_worker.gender.value_counts('female')
###Output
_____no_output_____
###Markdown
Show percentage of the gender distribution of the workers who labeled aggression
###Code
demographic_worker_agg.gender.value_counts('female')
###Output
_____no_output_____
###Markdown
Show the count of workers in each gender for toxicity
###Code
sns.set(rc={'figure.figsize':(10,6)})
sns.countplot(x="gender", hue="gender", data=demographic_worker)
###Output
_____no_output_____
###Markdown
Show percentage of the age group distribution of the workers for toxicity through visualization
###Code
df= demographic_worker.age_group.value_counts('age_group')*100
worker_age_df = pd.DataFrame({'age_group':df.index, 'proportion':df.values})
df
sns.set(rc={'figure.figsize':(10,6)})
sns.barplot(x="age_group", y = "proportion", hue="age_group", data=worker_age_df,
hue_order = ['Under 18','18-30', '30-45', '45-60', 'Over 60'],
order = ['Under 18','18-30', '30-45', '45-60', 'Over 60'])
###Output
_____no_output_____
###Markdown
Show percentage of the age-bucket distribution of the workers who labeled aggression
###Code
agg_df= demographic_worker_agg.age_group.value_counts('age_group')*100
worker_age_df_agg = pd.DataFrame({'age_group':agg_df.index, 'proportion':agg_df.values})
agg_df
###Output
_____no_output_____
###Markdown
Step 1: AnalysisThere are two anlysis that I would like to do. First, I want to furthur analyze the demographic information about the Crowdflower workers that is available in the dataset and answering the questions as the follwoing: How well does the demographic profile of the crowdworkers match that of the general population? What are some potential consequences of skewed labeller demographic distribution on the behavior of a model trained on this dataset? The second analysis I want to do is to explore relationships between worker demographics and labeling behavior. I would like to answer the questions as the following: Are female-identified labelers more or less likely to label comments as aggressive than male-identified labelers? If the labelling behaviors are different, what are some possible causes and consequences of this difference?I will be using both the toxicity data and the aggression data for my analysis here. Analysis 1 In the first analysis, I would like to compare the demographic information about the crowflower workers who labeled the toxicity data and aggression data with the demographic information of the general population. Here are the overview of the steps I took: - Find the gender distribution data and the age distribution information of the general population from UN's data - Choose the year 2015 since the age and gender distribution stays fairly constant in a range of years- Perform data manipulation to compare the demographic information of the general public with the crowflowers' Load the age distribution data download from UN's website
###Code
xls = pd.ExcelFile('PopulationAgeSex-20201019065126.xlsx')
population_age_df = pd.read_excel(xls, 'Data')
###Output
_____no_output_____
###Markdown
Perform data manipulation to be able to match with the age-bucket in our crowdflower workers' data.
###Code
population_age_df = population_age_df.iloc[[13]]
population_age_df = population_age_df.drop(columns=['ISO 3166-1 numeric code', 'Location', 'Time', 'Sex'])
population_age_df = pd.melt(population_age_df, var_name='age_group', value_name='population')
# define new age group for the UN data
age_group_population = ['Under 18', 'Under 18', 'Under 18', 'Under 18',
'18-30', '18-30', '30-45', '30-45', '30-45',
'45-60', '45-60','45-60',
'Over 60', 'Over 60', 'Over 60', 'Over 60', 'Over 60', 'Over 60', 'Over 60', 'Over 60', 'Over 60']
population_age_df['age_group_population'] = age_group_population
population_age_df
# drop the unused column
new_population_age_df = population_age_df.drop(columns = ['age_group'])
# calculate the distribution of propotion of each age group
world_demographic = new_population_age_df.groupby('age_group_population').agg('sum')['population']/new_population_age_df.sum()['population']*100
world_demographic
# reformat the poportion into a dataframe
world_demographic = pd.DataFrame({'age_group':world_demographic.index, 'world_proportion':world_demographic.values})
world_demographic
###Output
_____no_output_____
###Markdown
We can now compare the distribution of age group of workers who labeled toxicity vs the general population
###Code
worker_vs_world_age_df = world_demographic.merge(worker_age_df, on = 'age_group')
worker_vs_world_age_df
###Output
_____no_output_____
###Markdown
We now repeat the same procedure for comparing the distribution of age group of workers who labeled aggression vs the general population
###Code
agg_worker_vs_world_age_df = world_demographic.merge(worker_age_df_agg, on = 'age_group')
agg_worker_vs_world_age_df
###Output
_____no_output_____
###Markdown
Analysis 1 findings From UN's data, I find the demographic distribution for age-group and gender for the year 2015. I did some data manipulation for the age-group to match up with the age-group in the dataset. For gender information (https://population.un.org/wpp/DataQuery/), male vs female ratio is 101.7 : 100 which is distributed evenly. Compared to the demographic info for the crowdflower workers, we see a very uneven distribution across gender and age-group population distribution. Workers who labeled for toxicity data and aggression data shows the same skewed demographic information. The demographic profile of the crowdflower workers do not match that of the general population well at all. We see that there are twice as much male crowdflower workers than females. We also see that there are a lot more younger people over 18-year-old among workers than the proportion of this age group in the general population. Such bias in the demographic data of the workers is very obvious. We should pay attention to such bias in the data since it is possible that male vs female or young people vs old people have different level of sentiment when they label the wiki comments on a scale of -2 to 2 for both the toxicity and aggression datasets. Thus in our second analysis, we go furthur to analysis suh bias in the demographic profile of the crodflower workers. Analysis 2 In this analysis, I will furthur explore the bias in the demographic profile of workers. I want to answer the question: are female-identified labelers more or less likely to label comments as aggressive than male-identified labelers? In order to answer this question, I followed the steps below:- Merge annotations for toxicity with the corresponding worker demographic data and do the same thing for aggression data- Calculate the conditional probability using Bayes' theorem for P(aggression level given female workers) and P(aggression level given male workers)- Do the same thing for the toxicity data Aggression Merge annotations for aggression with the corresponding worker demographic data
###Code
annotations_agg_demo = annotations_agg.merge(demographic_worker_agg, on = 'worker_id')
annotations_agg_demo
###Output
_____no_output_____
###Markdown
Find the porprotion of female and male who give aggression score < 0 for aggression data
###Code
annotations_agg_new = annotations_agg_demo[annotations_agg_demo['aggression_score'] < 0]
annotations_agg_new.gender.value_counts('female')
###Output
_____no_output_____
###Markdown
Calculate the joint probability for female/male and aggression score < 0
###Code
print('P(aggression < 0 and gender = female): {}'.format(len(annotations_agg_new) * 0.386576 / len(annotations_agg_demo)))
print('P(aggression < 0 and gender = male): {}'.format(len(annotations_agg_new) * 0.613188 / len(annotations_agg_demo)))
###Output
P(aggression < 0 and gender = male): 0.11236172641008796
###Markdown
Find the porprotion of female and male who give extreme aggression score = -2
###Code
annotations_agg_neg2 = annotations_agg_demo[annotations_agg_demo['aggression_score'] == -2]
annotations_agg_neg2.gender.value_counts('female')
print('P(aggression = -2 and gender = female): {}'.format(len(annotations_agg_neg2) * 0.364790 / len(annotations_agg_demo)))
print('P(aggression = -2 and gender = male): {}'.format(len(annotations_agg_neg2) * 0.634996 / len(annotations_agg_demo)))
###Output
P(aggression = -2 and gender = male): 0.027779785359444732
###Markdown
Below is what we calculated before for p(female) worker in the aggression dataset.
###Code
annotations_agg_demo.gender.value_counts('female')
###Output
_____no_output_____
###Markdown
We use the Bayes' theorem to figure how the condition probability: P(aggression_score \begin{equation*}P(aggression < 0 | gender = female/male) = \frac{P(aggression < 0 \cap gender = female/male)}{P(gender = female/male)}\end{equation*} \begin{equation*}P(aggression < 0 | gender = female) = \frac{0.0708369}{0.360138} = 0.19667\end{equation*} \begin{equation*}P(aggression < 0 | gender = male) = \frac{0.1123617}{0.639765} = 0.1756\end{equation*} We calculate the conditional probability also for the extreme aggressive scores \begin{equation*}P(aggression = -2 | gender = female) = \frac{0.016}{0.360138} = 0.044\end{equation*} \begin{equation*}P(aggression = -2 | gender = male) = \frac{0.028}{0.639765} = 0.044\end{equation*} ToxicityWe now do the same thing for the toxicity data
###Code
annotations_demo = annotations.merge(demographic_worker, on = 'worker_id')
###Output
_____no_output_____
###Markdown
Find the porprotion of female and male who give toxicty score < 0
###Code
annotations_new = annotations_demo[annotations_demo['toxicity_score'] < 0]
annotations_new.gender.value_counts('female')
###Output
_____no_output_____
###Markdown
Calculate the joint probability for female/male and toxicity score < 0
###Code
print('P(toxicity < 0 and gender = female): {}'.format(len(annotations_new) * 0.366003 / len(annotations_demo)))
print('P(toxicity < 0 and gender = male): {}'.format(len(annotations_new) * 0.633697 / len(annotations_demo)))
###Output
P(toxicity < 0 and gender = male): 0.09235207367952158
###Markdown
Find the porprotion of female and male who give extreme toxicity score = -2
###Code
annotations_neg2 = annotations_demo[annotations_demo['toxicity_score'] == -2]
annotations_neg2.gender.value_counts('female')
print('P(toxicity = -2 and gender = female): {}'.format(len(annotations_neg2) * 0.391500 / len(annotations_demo)))
print('P(toxicity = -2 and gender = male): {}'.format(len(annotations_neg2) * 0.608083 / len(annotations_demo)))
###Output
P(toxicity = -2 and gender = male): 0.01623143771098515
###Markdown
Below is what we calculated before for p(female) worker in the toxicity dataset.
###Code
annotations_demo.gender.value_counts('female')
###Output
_____no_output_____ |
docs/tutorials/nb_distribution_statistics.ipynb | ###Markdown
Univariate Distribution Similarity[](https://colab.research.google.com/github/ing-bank/probatus/blob/master/docs/tutorials/nb_distribution_statistics.ipynb) There are many situations when you want to perform univariate distribution comparison of a given feature, e.g. stability of the feature over different months.In order to do that, you can use statistical tests. In this tutorial we present how to easily do this using the `DistributionStatistics` class, and with the statistical tests directly.The available statistical tests in `probatus.stat_tests` are: - Epps-Singleton ('ES')- Kolmogorov-Smirnov statistic ('KS')- Population Stability Index ('PSI')- Shapiro-Wilk based difference statistic ('SW')- Anderson-Darling TS ('AD')You can perform all these tests using a convenient wrapper class called `DistributionStatistics`.In this tutorial we will focus on how to perform two useful tests: Population Stability Index (widely applied in banking industry) and Kolmogorov-Smirnov. Setup
###Code
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from probatus.binning import AgglomerativeBucketer, SimpleBucketer, QuantileBucketer
from probatus.stat_tests import DistributionStatistics, psi, ks
###Output
_____no_output_____
###Markdown
Let's define some test distributions and visualize them. For these examples, we will use a normal distribution and a shifted version of the same distribution.
###Code
counts = 1000
np.random.seed(0)
d1 = pd.Series(np.random.normal(size=counts), name='feature_1')
d2 = pd.Series(np.random.normal(loc=0.5, size=counts), name='feature_1')
from probatus.utils.plots import plot_distributions_of_feature
feature_distributions = [d1, d2]
sample_names = ['expected', 'actual']
plot_distributions_of_feature(feature_distributions, sample_names=sample_names, plot_perc_outliers_removed=0.01)
###Output
_____no_output_____
###Markdown
Binning - QuantileBucketerTo visualize the data, we will bin the data using a quantile bucketer, available in the `probatus.binning` module.Binning is used by all the `stats_tests` in order to group observations.
###Code
bins = 10
myBucketer = QuantileBucketer(bins)
d1_bincounts = myBucketer.fit_compute(d1)
d2_bincounts = myBucketer.compute(d2)
print("Bincounts for d1 and d2:")
print(d1_bincounts)
print(d2_bincounts)
###Output
Bincounts for d1 and d2:
[100 100 100 100 100 100 100 100 100 100]
[ 25 62 50 68 76 90 84 169 149 217]
###Markdown
Let's plot the distribution for which we will calculate the statistics.
###Code
plt.figure(figsize=(20,5))
plt.bar(range(0, len(d1_bincounts)), d1_bincounts, label='d1: expected')
plt.bar(range(0, len(d2_bincounts)), d2_bincounts, label='d2: actual', alpha=0.5)
plt.title('PSI (bucketed)', fontsize=16, fontweight='bold')
plt.legend(fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
By visualizing the bins, we can already notice that the distributions are different.Let's use the statistical test to prove that. PSI - Population Stability IndexThe population stability index ([Karakoulas, 2004](https://cms.rmau.org/uploadedFiles/Credit_Risk/Library/RMA_Journal/Other_Topics_(1998_to_present)/Empirical%20Validation%20of%20Retail%20Credit-Scoring%20Models.pdf)) has long been used to evaluate distribution similarity in banking industry, while developing credit decision models.In probatus we have implemented the PSI according to [Yurdakul 2018](https://scholarworks.wmich.edu/cgi/viewcontent.cgi?article=4249&context=dissertations), which derives a p-value, based on the hard to interpret PSI statistic. Using the p-value is a more reliable choice, because the banking industry-standard PSI critical values of 0.1 and 0.25 are unreliable heuristics because there is a strong dependency on sample sizes and number of bins. Aside from these heuristics, the PSI value is not easily interpretable in the context of common statistical frameworks (like a p-value or confidence levels).
###Code
psi_value, p_value = psi(d1_bincounts, d2_bincounts, verbose=True)
###Output
PSI = 0.32743036141828374
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
Based on the above test, the distribution between the two samples significantly differs.Not only the PSI statistic is above the commonly used critical value, but also the p-value shows a very high confidence. PSI with DistributionStatistics Using `DistributionStatistics` class one can apply the above test, without the need to manually perform the binning. We initialize a `DistributionStatistics` instance with the desired test, binning_strategy (or choose `"default"` to choose the test's most appropriate binning strategy) and the number of bins. Then we start the test with the unbinned values as input.
###Code
distribution_test = DistributionStatistics("psi", binning_strategy="default", bin_count=10)
psi_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
PSI = 0.32743036141828374
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
KS: Kolmogorov-Smirnov with DistributionStatisticsThe Kolmogorov-Smirnov test compares two distributions by calculating the maximum difference of the two samples' distribution functions, as illustrated by the black arrow in the following figure. The KS test is available in `probatus.stat_tests.ks`.The main advantage of this method is its sensitivity to differences in both location and shape of the empirical cumulative distribution functions of the two samples.The main disadwantages are that: it works for continuous distributions (unless modified, e.g. see ([Jeng 2006](https://bmcmedresmethodol.biomedcentral.com/track/pdf/10.1186/1471-2288-6-45)), in large samples, small and unimportant differences can be statistically significant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)), and finally in small samples, large and important differences can be statistically insignificant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)). As before, using the test requires you to perform the binning beforehand
###Code
k_value, p_value = ks(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
Again, we can also choose to combine the binning and the statistical test using the `DistributionStatistics` class.
###Code
distribution_test = DistributionStatistics("ks", binning_strategy=None)
ks_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
AutoDist
###Code
from probatus.stat_tests import AutoDist
###Output
_____no_output_____
###Markdown
Multiple statistics can automatically be calculated using `AutoDist`. To show this, let's create two new dataframes with two features each.
###Code
size, n_features = 100, 2
df1 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
df2 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
###Output
_____no_output_____
###Markdown
We can now specify the statistical tests we want to perform and the binning strategies to perform. We can also set both of these variables to `'all'` or binning strategies to `'default'` to use the default binning strategy for every chosen statistical test.
###Code
statistical_tests = ["KS", "PSI"]
binning_strategies = "default"
###Output
_____no_output_____
###Markdown
Let's compute the statistics and their p_values:
###Code
myAutoDist = AutoDist(statistical_tests=statistical_tests, binning_strategies=binning_strategies, bin_count=10)
myAutoDist.compute(df1, df2)
###Output
100%|██████████| 4/4 [00:00<00:00, 140.75it/s]
###Markdown
Univariate Distribution Similarity[](https://colab.research.google.com/github/ing-bank/probatus/blob/master/docs/tutorials/nb_distribution_statistics.ipynb) There are many situations when you want to perform univariate distribution comparison of a given feature, e.g. stability of the feature over different months.In order to do that, you can use statistical tests. In this tutorial we present how to easily do this using the `DistributionStatistics` class, and with the statistical tests directly.Available tests:- `'ES'`: Epps-Singleton- `'KS'`: Kolmogorov-Smirnov- `'PSI'`: Population Stability Index- `'SW'`: Shapiro-Wilk- `'AD'`: Anderson-DarlingDetails on the available tests can be found [here](https://ing-bank.github.io/probatus/api/stat_tests.htmlavailable-tests).You can perform all these tests using a convenient wrapper class called `DistributionStatistics`.In this tutorial we will focus on how to perform two useful tests: Population Stability Index (widely applied in banking industry) and Kolmogorov-Smirnov. Setup
###Code
%%capture
!pip install probatus
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from probatus.binning import AgglomerativeBucketer, SimpleBucketer, QuantileBucketer
from probatus.stat_tests import DistributionStatistics, psi, ks
###Output
_____no_output_____
###Markdown
Let's define some test distributions and visualize them. For these examples, we will use a normal distribution and a shifted version of the same distribution.
###Code
counts = 1000
np.random.seed(0)
d1 = pd.Series(np.random.normal(size=counts), name='feature_1')
d2 = pd.Series(np.random.normal(loc=0.5, size=counts), name='feature_1')
from probatus.utils.plots import plot_distributions_of_feature
feature_distributions = [d1, d2]
sample_names = ['expected', 'actual']
plot_distributions_of_feature(feature_distributions, sample_names=sample_names, plot_perc_outliers_removed=0.01)
###Output
_____no_output_____
###Markdown
Binning - QuantileBucketerTo visualize the data, we will bin the data using a quantile bucketer, available in the `probatus.binning` module.Binning is used by all the `stats_tests` in order to group observations.
###Code
bins = 10
myBucketer = QuantileBucketer(bins)
d1_bincounts = myBucketer.fit_compute(d1)
d2_bincounts = myBucketer.compute(d2)
print("Bincounts for d1 and d2:")
print(d1_bincounts)
print(d2_bincounts)
###Output
Bincounts for d1 and d2:
[100 100 100 100 100 100 100 100 100 100]
[ 25 62 50 68 76 90 84 169 149 227]
###Markdown
Let's plot the distribution for which we will calculate the statistics.
###Code
plt.figure(figsize=(20,5))
plt.bar(range(0, len(d1_bincounts)), d1_bincounts, label='d1: expected')
plt.bar(range(0, len(d2_bincounts)), d2_bincounts, label='d2: actual', alpha=0.5)
plt.title('PSI (bucketed)', fontsize=16, fontweight='bold')
plt.legend(fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
By visualizing the bins, we can already notice that the distributions are different.Let's use the statistical test to prove that. PSI - Population Stability IndexThe population stability index ([Karakoulas, 2004](https://cms.rmau.org/uploadedFiles/Credit_Risk/Library/RMA_Journal/Other_Topics_(1998_to_present)/Empirical%20Validation%20of%20Retail%20Credit-Scoring%20Models.pdf)) has long been used to evaluate distribution similarity in the banking industry, while developing credit decision models.In `probatus` we have implemented the PSI according to [Yurdakul 2018](https://scholarworks.wmich.edu/cgi/viewcontent.cgi?article=4249&context=dissertations), which derives a p-value, based on the hard to interpret PSI statistic. Using the p-value is a more reliable choice, because the banking industry-standard PSI critical values of 0.1 and 0.25 are unreliable heuristics as there is a strong dependency on sample sizes and number of bins. Aside from these heuristics, the PSI value is not easily interpretable in the context of common statistical frameworks (like a p-value or confidence levels).
###Code
psi_value, p_value = psi(d1_bincounts, d2_bincounts, verbose=True)
###Output
PSI = 0.33942407655561885
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
Based on the above test, the distribution between the two samples significantly differ.Not only is the PSI statistic above the commonly used critical value, but also the p-value shows a very high confidence. PSI with DistributionStatistics Using the `DistributionStatistics` class one can apply the above test without the need to manually perform the binning. We initialize a `DistributionStatistics` instance with the desired test, binning_strategy (or choose `"default"` to choose the test's most appropriate binning strategy) and the number of bins. Then we start the test with the unbinned values as input.
###Code
distribution_test = DistributionStatistics("psi", binning_strategy="default", bin_count=10)
psi_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
PSI = 0.33942407655561885
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
KS: Kolmogorov-Smirnov with DistributionStatisticsThe Kolmogorov-Smirnov test compares two distributions by calculating the maximum difference of the two samples' distribution functions, as illustrated by the black arrow in the following figure. The KS test is available in `probatus.stat_tests.ks`.The main advantage of this method is its sensitivity to differences in both location and shape of the empirical cumulative distribution functions of the two samples.The main disadvantages are that: it works for continuous distributions (unless modified, e.g. see ([Jeng 2006](https://bmcmedresmethodol.biomedcentral.com/track/pdf/10.1186/1471-2288-6-45))); in large samples, small and unimportant differences can be statistically significant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)); and finally in small samples, large and important differences can be statistically insignificant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)). As before, using the test requires you to perform the binning beforehand
###Code
k_value, p_value = ks(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
Again, we can also choose to combine the binning and the statistical test using the `DistributionStatistics` class.
###Code
distribution_test = DistributionStatistics("ks", binning_strategy=None)
ks_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
AutoDist
###Code
from probatus.stat_tests import AutoDist
###Output
_____no_output_____
###Markdown
Multiple statistics can automatically be calculated using `AutoDist`. To show this, let's create two new dataframes with two features each.
###Code
size, n_features = 100, 2
df1 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
df2 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
###Output
_____no_output_____
###Markdown
We can now specify the statistical tests we want to perform and the binning strategies to perform. We can also set both of these variables to `'all'` or binning strategies to `'default'` to use the default binning strategy for every chosen statistical test.
###Code
statistical_tests = ["KS", "PSI"]
binning_strategies = "default"
###Output
_____no_output_____
###Markdown
Let's compute the statistics and their p_values:
###Code
myAutoDist = AutoDist(statistical_tests=statistical_tests, binning_strategies=binning_strategies, bin_count=10)
myAutoDist.compute(df1, df2)
###Output
100%|██████████| 2/2 [00:00<00:00, 141.92it/s]
100%|██████████| 2/2 [00:00<00:00, 139.13it/s]
###Markdown
Univariate Distribution Similarity[](https://colab.research.google.com/github/ing-bank/probatus/blob/master/docs/tutorials/nb_distribution_statistics.ipynb) There are many situations when you want to perform univariate distribution comparison of a given feature, e.g. stability of the feature over different months.In order to do that, you can use statistical tests. In this tutorial we present how to easily do this using the `DistributionStatistics` class, and with the statistical tests directly.The available statistical tests in `probatus.stat_tests` are: - Epps-Singleton ('ES')- Kolmogorov-Smirnov statistic ('KS')- Population Stability Index ('PSI')- Shapiro-Wilk based difference statistic ('SW')- Anderson-Darling TS ('AD')You can perform all these tests using a convenient wrapper class called `DistributionStatistics`.In this tutorial we will focus on how to perform two useful tests: Population Stability Index (widely applied in banking industry) and Kolmogorov-Smirnov. Setup
###Code
%%capture
!pip install probatus
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from probatus.binning import AgglomerativeBucketer, SimpleBucketer, QuantileBucketer
from probatus.stat_tests import DistributionStatistics, psi, ks
###Output
_____no_output_____
###Markdown
Let's define some test distributions and visualize them. For these examples, we will use a normal distribution and a shifted version of the same distribution.
###Code
counts = 1000
np.random.seed(0)
d1 = pd.Series(np.random.normal(size=counts), name='feature_1')
d2 = pd.Series(np.random.normal(loc=0.5, size=counts), name='feature_1')
from probatus.utils.plots import plot_distributions_of_feature
feature_distributions = [d1, d2]
sample_names = ['expected', 'actual']
plot_distributions_of_feature(feature_distributions, sample_names=sample_names, plot_perc_outliers_removed=0.01)
###Output
_____no_output_____
###Markdown
Binning - QuantileBucketerTo visualize the data, we will bin the data using a quantile bucketer, available in the `probatus.binning` module.Binning is used by all the `stats_tests` in order to group observations.
###Code
bins = 10
myBucketer = QuantileBucketer(bins)
d1_bincounts = myBucketer.fit_compute(d1)
d2_bincounts = myBucketer.compute(d2)
print("Bincounts for d1 and d2:")
print(d1_bincounts)
print(d2_bincounts)
###Output
Bincounts for d1 and d2:
[100 100 100 100 100 100 100 100 100 100]
[ 25 62 50 68 76 90 84 169 149 227]
###Markdown
Let's plot the distribution for which we will calculate the statistics.
###Code
plt.figure(figsize=(20,5))
plt.bar(range(0, len(d1_bincounts)), d1_bincounts, label='d1: expected')
plt.bar(range(0, len(d2_bincounts)), d2_bincounts, label='d2: actual', alpha=0.5)
plt.title('PSI (bucketed)', fontsize=16, fontweight='bold')
plt.legend(fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
By visualizing the bins, we can already notice that the distributions are different.Let's use the statistical test to prove that. PSI - Population Stability IndexThe population stability index ([Karakoulas, 2004](https://cms.rmau.org/uploadedFiles/Credit_Risk/Library/RMA_Journal/Other_Topics_(1998_to_present)/Empirical%20Validation%20of%20Retail%20Credit-Scoring%20Models.pdf)) has long been used to evaluate distribution similarity in the banking industry, while developing credit decision models.In `probatus` we have implemented the PSI according to [Yurdakul 2018](https://scholarworks.wmich.edu/cgi/viewcontent.cgi?article=4249&context=dissertations), which derives a p-value, based on the hard to interpret PSI statistic. Using the p-value is a more reliable choice, because the banking industry-standard PSI critical values of 0.1 and 0.25 are unreliable heuristics as there is a strong dependency on sample sizes and number of bins. Aside from these heuristics, the PSI value is not easily interpretable in the context of common statistical frameworks (like a p-value or confidence levels).
###Code
psi_value, p_value = psi(d1_bincounts, d2_bincounts, verbose=True)
###Output
PSI = 0.33942407655561885
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
Based on the above test, the distribution between the two samples significantly differ.Not only is the PSI statistic above the commonly used critical value, but also the p-value shows a very high confidence. PSI with DistributionStatistics Using the `DistributionStatistics` class one can apply the above test without the need to manually perform the binning. We initialize a `DistributionStatistics` instance with the desired test, binning_strategy (or choose `"default"` to choose the test's most appropriate binning strategy) and the number of bins. Then we start the test with the unbinned values as input.
###Code
distribution_test = DistributionStatistics("psi", binning_strategy="default", bin_count=10)
psi_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
PSI = 0.33942407655561885
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
KS: Kolmogorov-Smirnov with DistributionStatisticsThe Kolmogorov-Smirnov test compares two distributions by calculating the maximum difference of the two samples' distribution functions, as illustrated by the black arrow in the following figure. The KS test is available in `probatus.stat_tests.ks`.The main advantage of this method is its sensitivity to differences in both location and shape of the empirical cumulative distribution functions of the two samples.The main disadvantages are that: it works for continuous distributions (unless modified, e.g. see ([Jeng 2006](https://bmcmedresmethodol.biomedcentral.com/track/pdf/10.1186/1471-2288-6-45))); in large samples, small and unimportant differences can be statistically significant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)); and finally in small samples, large and important differences can be statistically insignificant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)). As before, using the test requires you to perform the binning beforehand
###Code
k_value, p_value = ks(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
Again, we can also choose to combine the binning and the statistical test using the `DistributionStatistics` class.
###Code
distribution_test = DistributionStatistics("ks", binning_strategy=None)
ks_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
AutoDist
###Code
from probatus.stat_tests import AutoDist
###Output
_____no_output_____
###Markdown
Multiple statistics can automatically be calculated using `AutoDist`. To show this, let's create two new dataframes with two features each.
###Code
size, n_features = 100, 2
df1 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
df2 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
###Output
_____no_output_____
###Markdown
We can now specify the statistical tests we want to perform and the binning strategies to perform. We can also set both of these variables to `'all'` or binning strategies to `'default'` to use the default binning strategy for every chosen statistical test.
###Code
statistical_tests = ["KS", "PSI"]
binning_strategies = "default"
###Output
_____no_output_____
###Markdown
Let's compute the statistics and their p_values:
###Code
myAutoDist = AutoDist(statistical_tests=statistical_tests, binning_strategies=binning_strategies, bin_count=10)
myAutoDist.compute(df1, df2)
###Output
100%|██████████| 2/2 [00:00<00:00, 141.92it/s]
100%|██████████| 2/2 [00:00<00:00, 139.13it/s]
###Markdown
Univariate Distribution Similarity[](https://colab.research.google.com/github/ing-bank/probatus/blob/master/docs/tutorials/nb_distribution_statistics.ipynb) There are many situations when you want to perform univariate distribution comparison of a given feature, e.g. stability of the feature over different months.In order to do that, you can use statistical tests. In this tutorial we present how to easily do this using the `DistributionStatistics` class, and with the statistical tests directly.The available statistical tests in `probatus.stat_tests` are: - Epps-Singleton ('ES')- Kolmogorov-Smirnov statistic ('KS')- Population Stability Index ('PSI')- Shapiro-Wilk based difference statistic ('SW')- Anderson-Darling TS ('AD')You can perform all these tests using a convenient wrapper class called `DistributionStatistics`.In this tutorial we will focus on how to perform two useful tests: Population Stability Index (widely applied in banking industry) and Kolmogorov-Smirnov. Setup
###Code
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from probatus.binning import AgglomerativeBucketer, SimpleBucketer, QuantileBucketer
from probatus.stat_tests import DistributionStatistics, psi, ks
###Output
_____no_output_____
###Markdown
Let's define some test distributions and visualize them. For these examples, we will use a normal distribution and a shifted version of the same distribution.
###Code
counts = 1000
np.random.seed(0)
d1 = pd.Series(np.random.normal(size=counts), name='feature_1')
d2 = pd.Series(np.random.normal(loc=0.5, size=counts), name='feature_1')
from probatus.utils.plots import plot_distributions_of_feature
feature_distributions = [d1, d2]
sample_names = ['expected', 'actual']
plot_distributions_of_feature(feature_distributions, sample_names=sample_names, plot_perc_outliers_removed=0.01)
###Output
_____no_output_____
###Markdown
Binning - QuantileBucketerTo visualize the data, we will bin the data using a quantile bucketer, available in the `probatus.binning` module.Binning is used by all the `stats_tests` in order to group observations.
###Code
bins = 10
myBucketer = QuantileBucketer(bins)
d1_bincounts = myBucketer.fit_compute(d1)
d2_bincounts = myBucketer.compute(d2)
print("Bincounts for d1 and d2:")
print(d1_bincounts)
print(d2_bincounts)
###Output
Bincounts for d1 and d2:
[100 100 100 100 100 100 100 100 100 100]
[ 25 62 50 68 76 90 84 169 149 217]
###Markdown
Let's plot the distribution for which we will calculate the statistics.
###Code
plt.figure(figsize=(20,5))
plt.bar(range(0, len(d1_bincounts)), d1_bincounts, label='d1: expected')
plt.bar(range(0, len(d2_bincounts)), d2_bincounts, label='d2: actual', alpha=0.5)
plt.title('PSI (bucketed)', fontsize=16, fontweight='bold')
plt.legend(fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
By visualizing the bins, we can already notice that the distributions are different.Let's use the statistical test to prove that. PSI - Population Stability IndexThe population stability index ([Karakoulas, 2004](https://cms.rmau.org/uploadedFiles/Credit_Risk/Library/RMA_Journal/Other_Topics_(1998_to_present)/Empirical%20Validation%20of%20Retail%20Credit-Scoring%20Models.pdf)) has long been used to evaluate distribution similarity in banking industry, while developing credit decision models.In probatus we have implemented the PSI according to [Yurdakul 2018](https://scholarworks.wmich.edu/cgi/viewcontent.cgi?article=4249&context=dissertations), which derives a p-value, based on the hard to interpret PSI statistic. Using the p-value is a more reliable choice, because the banking industry-standard PSI critical values of 0.1 and 0.25 are unreliable heuristics because there is a strong dependency on sample sizes and number of bins. Aside from these heuristics, the PSI value is not easily interpretable in the context of common statistical frameworks (like a p-value or confidence levels).
###Code
psi_value, p_value = psi(d1_bincounts, d2_bincounts, verbose=True)
###Output
PSI = 0.32743036141828374
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
Bssed on the above test, the distribution between the two samples significantly differ.Not only the PSI statistic is above the commonly used critical value, but also the p-value shows a very high confidence. PSI with DistributionStatistics Using `DistributionStatistics` class one can apply the above test, without the need to manually perform the binning. We initialize a `DistributionStatistics` instance with the desired test, binning_strategy (or choose `"default"` to choose the test's most appropriate binning strategy) and the number of bins. Then we start the test with the unbinned values as input.
###Code
distribution_test = DistributionStatistics("psi", binning_strategy="default", bin_count=10)
psi_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
PSI = 0.32743036141828374
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
KS: Kolmogorov-Smirnov with DistributionStatisticsThe Kolmogorov-Smirnov test compares two distributions by calculating the maximum difference of the two samples' distribution functions, as illustrated by the black arrow in the following figure. The KS test is available in `probatus.stat_tests.ks`.The main advantage of this method is its sensitivity to differences in both location and shape of the empirical cumulative distribution functions of the two samples.The main disadwantages are that: it works for continuous distributions (unless modified, e.g. see ([Jeng 2006](https://bmcmedresmethodol.biomedcentral.com/track/pdf/10.1186/1471-2288-6-45)), in large samples, small and unimportant differences can be statistically significant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)), and finally in small samples, large and important differences can be statistically insignificant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)). As before, using the test requires you to perform the binning beforehand
###Code
k_value, p_value = ks(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
Again, we can also choose to combine the binning and the statistical test using the `DistributionStatistics` class.
###Code
distribution_test = DistributionStatistics("ks", binning_strategy=None)
ks_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
AutoDist
###Code
from probatus.stat_tests import AutoDist
###Output
_____no_output_____
###Markdown
Multiple statistics can automatically be calculated using `AutoDist`. To show this, let's create two new dataframes with two features each.
###Code
size, n_features = 100, 2
df1 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
df2 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
###Output
_____no_output_____
###Markdown
We can now specify the statistical tests we want to perform and the binning strategies to perform. We can also set both of these variables to `'all'` or binning strategies to `'default'` to use the default binning strategy for every chosen statistical test.
###Code
statistical_tests = ["KS", "PSI"]
binning_strategies = "default"
###Output
_____no_output_____
###Markdown
Let's compute the statistics and their p_values:
###Code
myAutoDist = AutoDist(statistical_tests=statistical_tests, binning_strategies=binning_strategies, bin_count=10)
myAutoDist.compute(df1, df2)
###Output
100%|██████████| 4/4 [00:00<00:00, 140.75it/s]
###Markdown
Univariate Distribution Similarity[](https://colab.research.google.com/github/ing-bank/probatus/blob/master/docs/tutorials/nb_distribution_statistics.ipynb) There are many situations when you want to perform univariate distribution comparison of a given feature, e.g. stability of the feature over different months.In order to do that, you can use statistical tests. In this tutorial we present how to easily do this using the `DistributionStatistics` class, and with the statistical tests directly.The available statistical tests in `probatus.stat_tests` are: - Epps-Singleton ('ES')- Kolmogorov-Smirnov statistic ('KS')- Population Stability Index ('PSI')- Shapiro-Wilk based difference statistic ('SW')- Anderson-Darling TS ('AD')You can perform all these tests using a convenient wrapper class called `DistributionStatistics`.In this tutorial we will focus on how to perform two useful tests: Population Stability Index (widely applied in banking industry) and Kolmogorov-Smirnov. Setup
###Code
%%capture
!pip install probatus
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from probatus.binning import AgglomerativeBucketer, SimpleBucketer, QuantileBucketer
from probatus.stat_tests import DistributionStatistics, psi, ks
###Output
_____no_output_____
###Markdown
Let's define some test distributions and visualize them. For these examples, we will use a normal distribution and a shifted version of the same distribution.
###Code
counts = 1000
np.random.seed(0)
d1 = pd.Series(np.random.normal(size=counts), name='feature_1')
d2 = pd.Series(np.random.normal(loc=0.5, size=counts), name='feature_1')
from probatus.utils.plots import plot_distributions_of_feature
feature_distributions = [d1, d2]
sample_names = ['expected', 'actual']
plot_distributions_of_feature(feature_distributions, sample_names=sample_names, plot_perc_outliers_removed=0.01)
###Output
_____no_output_____
###Markdown
Binning - QuantileBucketerTo visualize the data, we will bin the data using a quantile bucketer, available in the `probatus.binning` module.Binning is used by all the `stats_tests` in order to group observations.
###Code
bins = 10
myBucketer = QuantileBucketer(bins)
d1_bincounts = myBucketer.fit_compute(d1)
d2_bincounts = myBucketer.compute(d2)
print("Bincounts for d1 and d2:")
print(d1_bincounts)
print(d2_bincounts)
###Output
Bincounts for d1 and d2:
[100 100 100 100 100 100 100 100 100 100]
[ 25 62 50 68 76 90 84 169 149 217]
###Markdown
Let's plot the distribution for which we will calculate the statistics.
###Code
plt.figure(figsize=(20,5))
plt.bar(range(0, len(d1_bincounts)), d1_bincounts, label='d1: expected')
plt.bar(range(0, len(d2_bincounts)), d2_bincounts, label='d2: actual', alpha=0.5)
plt.title('PSI (bucketed)', fontsize=16, fontweight='bold')
plt.legend(fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
By visualizing the bins, we can already notice that the distributions are different.Let's use the statistical test to prove that. PSI - Population Stability IndexThe population stability index ([Karakoulas, 2004](https://cms.rmau.org/uploadedFiles/Credit_Risk/Library/RMA_Journal/Other_Topics_(1998_to_present)/Empirical%20Validation%20of%20Retail%20Credit-Scoring%20Models.pdf)) has long been used to evaluate distribution similarity in banking industry, while developing credit decision models.In probatus we have implemented the PSI according to [Yurdakul 2018](https://scholarworks.wmich.edu/cgi/viewcontent.cgi?article=4249&context=dissertations), which derives a p-value, based on the hard to interpret PSI statistic. Using the p-value is a more reliable choice, because the banking industry-standard PSI critical values of 0.1 and 0.25 are unreliable heuristics because there is a strong dependency on sample sizes and number of bins. Aside from these heuristics, the PSI value is not easily interpretable in the context of common statistical frameworks (like a p-value or confidence levels).
###Code
psi_value, p_value = psi(d1_bincounts, d2_bincounts, verbose=True)
###Output
PSI = 0.32743036141828374
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
Bssed on the above test, the distribution between the two samples significantly differ.Not only the PSI statistic is above the commonly used critical value, but also the p-value shows a very high confidence. PSI with DistributionStatistics Using `DistributionStatistics` class one can apply the above test, without the need to manually perform the binning. We initialize a `DistributionStatistics` instance with the desired test, binning_strategy (or choose `"default"` to choose the test's most appropriate binning strategy) and the number of bins. Then we start the test with the unbinned values as input.
###Code
distribution_test = DistributionStatistics("psi", binning_strategy="default", bin_count=10)
psi_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
PSI = 0.32743036141828374
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
KS: Kolmogorov-Smirnov with DistributionStatisticsThe Kolmogorov-Smirnov test compares two distributions by calculating the maximum difference of the two samples' distribution functions, as illustrated by the black arrow in the following figure. The KS test is available in `probatus.stat_tests.ks`.The main advantage of this method is its sensitivity to differences in both location and shape of the empirical cumulative distribution functions of the two samples.The main disadwantages are that: it works for continuous distributions (unless modified, e.g. see ([Jeng 2006](https://bmcmedresmethodol.biomedcentral.com/track/pdf/10.1186/1471-2288-6-45)), in large samples, small and unimportant differences can be statistically significant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)), and finally in small samples, large and important differences can be statistically insignificant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)). As before, using the test requires you to perform the binning beforehand
###Code
k_value, p_value = ks(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
Again, we can also choose to combine the binning and the statistical test using the `DistributionStatistics` class.
###Code
distribution_test = DistributionStatistics("ks", binning_strategy=None)
ks_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
AutoDist
###Code
from probatus.stat_tests import AutoDist
###Output
_____no_output_____
###Markdown
Multiple statistics can automatically be calculated using `AutoDist`. To show this, let's create two new dataframes with two features each.
###Code
size, n_features = 100, 2
df1 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
df2 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
###Output
_____no_output_____
###Markdown
We can now specify the statistical tests we want to perform and the binning strategies to perform. We can also set both of these variables to `'all'` or binning strategies to `'default'` to use the default binning strategy for every chosen statistical test.
###Code
statistical_tests = ["KS", "PSI"]
binning_strategies = "default"
###Output
_____no_output_____
###Markdown
Let's compute the statistics and their p_values:
###Code
myAutoDist = AutoDist(statistical_tests=statistical_tests, binning_strategies=binning_strategies, bin_count=10)
myAutoDist.compute(df1, df2)
###Output
100%|██████████| 4/4 [00:00<00:00, 140.75it/s]
###Markdown
Univariate Distribution Similarity[](https://colab.research.google.com/github/ing-bank/probatus/blob/master/docs/tutorials/nb_distribution_statistics.ipynb) There are many situations when you want to perform univariate distribution comparison of a given feature, e.g. stability of the feature over different months.In order to do that, you can use statistical tests. In this tutorial we present how to easily do this using the `DistributionStatistics` class, and with the statistical tests directly.The available statistical tests in `probatus.stat_tests` are: - Epps-Singleton ('ES')- Kolmogorov-Smirnov statistic ('KS')- Population Stability Index ('PSI')- Shapiro-Wilk based difference statistic ('SW')- Anderson-Darling TS ('AD')You can perform all these tests using a convenient wrapper class called `DistributionStatistics`.In this tutorial we will focus on how to perform two useful tests: Population Stability Index (widely applied in banking industry) and Kolmogorov-Smirnov. Setup
###Code
%%capture
!pip install probatus
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from probatus.binning import AgglomerativeBucketer, SimpleBucketer, QuantileBucketer
from probatus.stat_tests import DistributionStatistics, psi, ks
###Output
_____no_output_____
###Markdown
Let's define some test distributions and visualize them. For these examples, we will use a normal distribution and a shifted version of the same distribution.
###Code
counts = 1000
np.random.seed(0)
d1 = pd.Series(np.random.normal(size=counts), name='feature_1')
d2 = pd.Series(np.random.normal(loc=0.5, size=counts), name='feature_1')
from probatus.utils.plots import plot_distributions_of_feature
feature_distributions = [d1, d2]
sample_names = ['expected', 'actual']
plot_distributions_of_feature(feature_distributions, sample_names=sample_names, plot_perc_outliers_removed=0.01)
###Output
_____no_output_____
###Markdown
Binning - QuantileBucketerTo visualize the data, we will bin the data using a quantile bucketer, available in the `probatus.binning` module.Binning is used by all the `stats_tests` in order to group observations.
###Code
bins = 10
myBucketer = QuantileBucketer(bins)
d1_bincounts = myBucketer.fit_compute(d1)
d2_bincounts = myBucketer.compute(d2)
print("Bincounts for d1 and d2:")
print(d1_bincounts)
print(d2_bincounts)
###Output
Bincounts for d1 and d2:
[100 100 100 100 100 100 100 100 100 100]
[ 25 62 50 68 76 90 84 169 149 217]
###Markdown
Let's plot the distribution for which we will calculate the statistics.
###Code
plt.figure(figsize=(20,5))
plt.bar(range(0, len(d1_bincounts)), d1_bincounts, label='d1: expected')
plt.bar(range(0, len(d2_bincounts)), d2_bincounts, label='d2: actual', alpha=0.5)
plt.title('PSI (bucketed)', fontsize=16, fontweight='bold')
plt.legend(fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
By visualizing the bins, we can already notice that the distributions are different.Let's use the statistical test to prove that. PSI - Population Stability IndexThe population stability index ([Karakoulas, 2004](https://cms.rmau.org/uploadedFiles/Credit_Risk/Library/RMA_Journal/Other_Topics_(1998_to_present)/Empirical%20Validation%20of%20Retail%20Credit-Scoring%20Models.pdf)) has long been used to evaluate distribution similarity in the banking industry, while developing credit decision models.In `probatus` we have implemented the PSI according to [Yurdakul 2018](https://scholarworks.wmich.edu/cgi/viewcontent.cgi?article=4249&context=dissertations), which derives a p-value, based on the hard to interpret PSI statistic. Using the p-value is a more reliable choice, because the banking industry-standard PSI critical values of 0.1 and 0.25 are unreliable heuristics as there is a strong dependency on sample sizes and number of bins. Aside from these heuristics, the PSI value is not easily interpretable in the context of common statistical frameworks (like a p-value or confidence levels).
###Code
psi_value, p_value = psi(d1_bincounts, d2_bincounts, verbose=True)
###Output
PSI = 0.32743036141828374
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
Based on the above test, the distribution between the two samples significantly differ.Not only is the PSI statistic above the commonly used critical value, but also the p-value shows a very high confidence. PSI with DistributionStatistics Using the `DistributionStatistics` class one can apply the above test without the need to manually perform the binning. We initialize a `DistributionStatistics` instance with the desired test, binning_strategy (or choose `"default"` to choose the test's most appropriate binning strategy) and the number of bins. Then we start the test with the unbinned values as input.
###Code
distribution_test = DistributionStatistics("psi", binning_strategy="default", bin_count=10)
psi_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
PSI = 0.32743036141828374
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
KS: Kolmogorov-Smirnov with DistributionStatisticsThe Kolmogorov-Smirnov test compares two distributions by calculating the maximum difference of the two samples' distribution functions, as illustrated by the black arrow in the following figure. The KS test is available in `probatus.stat_tests.ks`.The main advantage of this method is its sensitivity to differences in both location and shape of the empirical cumulative distribution functions of the two samples.The main disadvantages are that: it works for continuous distributions (unless modified, e.g. see ([Jeng 2006](https://bmcmedresmethodol.biomedcentral.com/track/pdf/10.1186/1471-2288-6-45))); in large samples, small and unimportant differences can be statistically significant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)); and finally in small samples, large and important differences can be statistically insignificant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)). As before, using the test requires you to perform the binning beforehand
###Code
k_value, p_value = ks(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
Again, we can also choose to combine the binning and the statistical test using the `DistributionStatistics` class.
###Code
distribution_test = DistributionStatistics("ks", binning_strategy=None)
ks_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
AutoDist
###Code
from probatus.stat_tests import AutoDist
###Output
_____no_output_____
###Markdown
Multiple statistics can automatically be calculated using `AutoDist`. To show this, let's create two new dataframes with two features each.
###Code
size, n_features = 100, 2
df1 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
df2 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
###Output
_____no_output_____
###Markdown
We can now specify the statistical tests we want to perform and the binning strategies to perform. We can also set both of these variables to `'all'` or binning strategies to `'default'` to use the default binning strategy for every chosen statistical test.
###Code
statistical_tests = ["KS", "PSI"]
binning_strategies = "default"
###Output
_____no_output_____
###Markdown
Let's compute the statistics and their p_values:
###Code
myAutoDist = AutoDist(statistical_tests=statistical_tests, binning_strategies=binning_strategies, bin_count=10)
myAutoDist.compute(df1, df2)
###Output
100%|██████████| 4/4 [00:00<00:00, 140.75it/s]
###Markdown
Univariate Distribution Similarity There are many situations when you want to perform univariate distribution comparison of a given feature, e.g. stability of the feature over different months.In order to do that, you can use statistical tests. In this tutorial we present how to easily do this using the `DistributionStatistics` class, and with the statistical tests directly.The available statistical tests in `probatus.stat_tests` are: - Epps-Singleton ('ES')- Kolmogorov-Smirnov statistic ('KS')- Population Stability Index ('PSI')- Shapiro-Wilk based difference statistic ('SW')- Anderson-Darling TS ('AD')You can perform all these tests using a convenient wrapper class called `DistributionStatistics`.In this tutorial we will focus on how to perform two useful tests: Population Stability Index (widely applied in banking industry) and Kolmogorov-Smirnov. Setup
###Code
%load_ext autoreload
%autoreload 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from probatus.binning import AgglomerativeBucketer, SimpleBucketer, QuantileBucketer
from probatus.stat_tests import DistributionStatistics, psi, ks
###Output
_____no_output_____
###Markdown
Let's define some test distributions and visualize them. For these examples, we will use a normal distribution and a shifted version of the same distribution.
###Code
counts = 1000
np.random.seed(0)
d1 = pd.Series(np.random.normal(size=counts), name='feature_1')
d2 = pd.Series(np.random.normal(loc=0.5, size=counts), name='feature_1')
from probatus.utils.plots import plot_distributions_of_feature
feature_distributions = [d1, d2]
sample_names = ['expected', 'actual']
plot_distributions_of_feature(feature_distributions, sample_names=sample_names, plot_perc_outliers_removed=0.01)
###Output
_____no_output_____
###Markdown
Binning - QuantileBucketerTo visualize the data, we will bin the data using a quantile bucketer, available in the `probatus.binning` module.Binning is used by all the `stats_tests` in order to group observations.
###Code
bins = 10
myBucketer = QuantileBucketer(bins)
d1_bincounts = myBucketer.fit_compute(d1)
d2_bincounts = myBucketer.compute(d2)
print("Bincounts for d1 and d2:")
print(d1_bincounts)
print(d2_bincounts)
###Output
Bincounts for d1 and d2:
[100 100 100 100 100 100 100 100 100 100]
[ 25 62 50 68 76 90 84 169 149 217]
###Markdown
Let's plot the distribution for which we will calculate the statistics.
###Code
plt.figure(figsize=(20,5))
plt.bar(range(0, len(d1_bincounts)), d1_bincounts, label='d1: expected')
plt.bar(range(0, len(d2_bincounts)), d2_bincounts, label='d2: actual', alpha=0.5)
plt.title('PSI (bucketed)', fontsize=16, fontweight='bold')
plt.legend(fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
By visualizing the bins, we can already notice that the distributions are different.Let's use the statistical test to prove that. PSI - Population Stability IndexThe population stability index ([Karakoulas, 2004](https://cms.rmau.org/uploadedFiles/Credit_Risk/Library/RMA_Journal/Other_Topics_(1998_to_present)/Empirical%20Validation%20of%20Retail%20Credit-Scoring%20Models.pdf)) has long been used to evaluate distribution similarity in banking industry, while developing credit decision models.In probatus we have implemented the PSI according to [Yurdakul 2018](https://scholarworks.wmich.edu/cgi/viewcontent.cgi?article=4249&context=dissertations), which derives a p-value, based on the hard to interpret PSI statistic. Using the p-value is a more reliable choice, because the banking industry-standard PSI critical values of 0.1 and 0.25 are unreliable heuristics because there is a strong dependency on sample sizes and number of bins. Aside from these heuristics, the PSI value is not easily interpretable in the context of common statistical frameworks (like a p-value or confidence levels).
###Code
psi_value, p_value = psi(d1_bincounts, d2_bincounts, verbose=True)
###Output
PSI = 0.32743036141828374
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
Bssed on the above test, the distribution between the two samples significantly differ.Not only the PSI statistic is above the commonly used critical value, but also the p-value shows a very high confidence. PSI with DistributionStatistics Using `DistributionStatistics` class one can apply the above test, without the need to manually perform the binning. We initialize a `DistributionStatistics` instance with the desired test, binning_strategy (or choose `"default"` to choose the test's most appropriate binning strategy) and the number of bins. Then we start the test with the unbinned values as input.
###Code
distribution_test = DistributionStatistics("psi", binning_strategy="default", bin_count=10)
psi_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
PSI = 0.32743036141828374
PSI: Critical values defined according to de facto industry standard:
PSI > 0.25: Significant distribution change; investigate.
PSI: Critical values defined according to Yurdakul (2018):
99.9% confident distributions have changed.
###Markdown
KS: Kolmogorov-Smirnov with DistributionStatisticsThe Kolmogorov-Smirnov test compares two distributions by calculating the maximum difference of the two samples' distribution functions, as illustrated by the black arrow in the following figure. The KS test is available in `probatus.stat_tests.ks`.The main advantage of this method is its sensitivity to differences in both location and shape of the empirical cumulative distribution functions of the two samples.The main disadwantages are that: it works for continuous distributions (unless modified, e.g. see ([Jeng 2006](https://bmcmedresmethodol.biomedcentral.com/track/pdf/10.1186/1471-2288-6-45)), in large samples, small and unimportant differences can be statistically significant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)), and finally in small samples, large and important differences can be statistically insignificant ([Taplin & Hunt 2019](https://www.mdpi.com/2227-9091/7/2/53/pdf)). As before, using the test requires you to perform the binning beforehand
###Code
k_value, p_value = ks(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
Again, we can also choose to combine the binning and the statistical test using the `DistributionStatistics` class.
###Code
distribution_test = DistributionStatistics("ks", binning_strategy=None)
ks_value, p_value = distribution_test.compute(d1, d2, verbose=True)
###Output
KS: pvalue = 2.104700973377179e-27
KS: Null hypothesis rejected with 99% confidence. Distributions very different.
###Markdown
AutoDist
###Code
from probatus.stat_tests import AutoDist
###Output
_____no_output_____
###Markdown
Multiple statistics can automatically be calculated using `AutoDist`. To show this, let's create two new dataframes with two features each.
###Code
size, n_features = 100, 2
df1 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
df2 = pd.DataFrame(np.random.normal(size=(size, n_features)), columns=[f'feat_{x}' for x in range(n_features)])
###Output
_____no_output_____
###Markdown
We can now specify the statistical tests we want to perform and the binning strategies to perform. We can also set both of these variables to `'all'` or binning strategies to `'default'` to use the default binning strategy for every chosen statistical test.
###Code
statistical_tests = ["KS", "PSI"]
binning_strategies = "default"
###Output
_____no_output_____
###Markdown
Let's compute the statistics and their p_values:
###Code
myAutoDist = AutoDist(statistical_tests=statistical_tests, binning_strategies=binning_strategies, bin_count=10)
myAutoDist.compute(df1, df2)
###Output
100%|██████████| 4/4 [00:00<00:00, 140.75it/s]
|
Module2/02b - Data and Features Lab.ipynb | ###Markdown
Assignment 2This time, you're going to attempt to load your first csv dataset! Open up the starter code located in Module2/assignment2.py. Read through it and follow the directions to: Load up Module2/Datasets/tutorial.csv Print the entire dataframe, using print df Use the .describe() method on the dataset Slice the dataset using [2:4, 'col3']
###Code
# %load 'assignment2.py'
import pandas as pd
# TODO: Load up the 'tutorial.csv' dataset
#
# .. your code here ..
# TODO: Print the results of the .describe() method
#
# .. your code here ..
# TODO: Figure out which indexing method you need to
# use in order to index your dataframe with: [2:4,'col3']
# And print the results
#
# .. your code here ..
import pandas as pd
data = pd.read_csv('Datasets/tutorial.csv')
###Output
_____no_output_____
###Markdown
Question 1 When you print the results of calling .describe() on your dataframe, what is the value displayed in the bottom right corner (col3 max)?
###Code
data.describe()
###Output
_____no_output_____
###Markdown
Question 2 Which of the many indexing methods did you use to get [2:4,'col3'] working? Question 3 How many values are returned when you print the results of the [2:4,'col3'] indexing operation?
###Code
data.loc[2:4,'col3']
###Output
_____no_output_____
###Markdown
Assignment 3 MIT's Karl Ulrich donated a dataset titled Servo Data Set to the UCI Machine Learning Repository in the 1980's. The dataset has been described as "an interesting collection of data that covers an extremely non-linear phenomenon - predicting the rise time of a servomechanism in terms of two (continuous) gain settings and two (discrete) choices of mechanical linkages." As noted on the dataset website above, the column names are defined in order as: ['motor', 'screw', 'pgain', 'vgain', 'class'] Your mission, should you choose to accept, is to figure out a few stats about this dataset, which has been conveniently copied to your Module2/Datasets/servo.data. You can get started by opening up the assignment starter code, saved to Module2/assignment3.py.
###Code
# %load 'assignment3.py'
import pandas as pd
# TODO: Load up the dataset
# Ensuring you set the appropriate header column names
#
# .. your code here ..
# TODO: Create a slice that contains all entries
# having a vgain equal to 5. Then print the
# length of (# of samples in) that slice:
#
# .. your code here ..
# TODO: Create a slice that contains all entries
# having a motor equal to E and screw equal
# to E. Then print the length of (# of
# samples in) that slice:
#
# .. your code here ..
# TODO: Create a slice that contains all entries
# having a pgain equal to 4. Use one of the
# various methods of finding the mean vgain
# value for the samples in that slice. Once
# you've found it, print it:
#
# .. your code here ..
# TODO: (Bonus) See what happens when you run
# the .dtypes method on your dataframe!
import pandas as pd
data = pd.read_csv('Datasets/servo.data', header = None)
headers = ['motor', 'screw', 'pgain', 'vgain', 'class']
data.columns = headers
data.head()
###Output
_____no_output_____
###Markdown
Question 1a How many samples in this dataset have a vgain feature value equal to 5?
###Code
t = data[data["vgain"] == 5]
len(t)
###Output
_____no_output_____
###Markdown
Question 1b How many samples in this dataset contain the value E for both motor and screw features? Be sure to validate you've correctly loaded your data before answering!
###Code
t = data[(data["motor"] == "E") & (data["screw"] == "E")]
t.head()
len(t)
###Output
_____no_output_____
###Markdown
Question 1c What is the mean vgain value of those samples that have a pgain feature value equal to 4?
###Code
t = data[data["pgain"] == 4]
t.describe()
t.head()
###Output
_____no_output_____
###Markdown
Assignment 4 Navigate over to ESPN's website for NHL Historic Player Points Statistics, for the years 2014-2015. This page has a table on it with a few stats we're interested in obtaining. But it's a bit messy! Clean it up for us, using the appropriate commands to: Load up the table on just this page into a Pandas dataframe. No need to worry about the other pages! Rename the columns so that they match the column definitions on the website. Get rid of (drop) any erroneous rows that has at least 4 NANs in them. Get rid of the RK column. Ensure there are no nan "holes" in your index. Check the dtypes of all columns, and ensure those that should be numeric are numeric.
###Code
# %load 'assignment4.py'
import pandas as pd
# TODO: Load up the table, and extract the dataset
# out of it. If you're having issues with this, look
# carefully at the sample code provided in the reading
#
# .. your code here ..
# TODO: Rename the columns so that they are similar to the
# column definitions provided to you on the website.
# Be careful and don't accidentially use any names twice.
#
# .. your code here ..
# TODO: Get rid of any row that has at least 4 NANs in it,
# e.g. that do not contain player points statistics
#
# .. your code here ..
# TODO: At this point, look through your dataset by printing
# it. There probably still are some erroneous rows in there.
# What indexing command(s) can you use to select all rows
# EXCEPT those rows?
#
# .. your code here ..
# TODO: Get rid of the 'RK' column
#
# .. your code here ..
# TODO: Ensure there are no holes in your index by resetting
# it. By the way, don't store the original index
#
# .. your code here ..
# TODO: Check the data type of all columns, and ensure those
# that should be numeric are numeric
#
# .. your code here ..
# TODO: Your dataframe is now ready! Use the appropriate
# commands to answer the questions on the course lab page.
#
# .. your code here ..
import pandas as pd
###Output
_____no_output_____
###Markdown
Question 1 After completing the 6 steps above, how many rows remain in this dataset? (Not to be confused with the index!)
###Code
data = pd.read_html?
data = pd.read_html
url = "http://www.espn.com/nhl/statistics/player/_/stat/points/sort/points/year/2015/seasontype/2"
dataTables = pd.read_html(url, skiprows = 2)
len(dataTables)
data = dataTables[0]
data.head()
data.columns = ["x_x", "player", "team", "gp", "g", "a", "pts", "plusminus", "pim", "pts_g", "sog", "pct", "gwg",
"pp_g", "pp_a", "sh_g", "sh_a"]
data = data.drop(labels = ["x_x"], axis = 1)
data.head()
print len(data)
data = data.dropna(axis = 0, thresh = 4)
print len(data)
data = data.reset_index(drop= True)
print data.dtypes
for i in list(data.columns)[2:]:
data[i] = pd.to_numeric(data[i], errors="coerce")
print data.dtypes
print len(data)
data = data.dropna(axis = 0, thresh = 4)
print len(data)
data = data.reset_index(drop= True)
###Output
43
40
###Markdown
Question 2 How many unique PCT values exist in the table?
###Code
len(set(data["pct"]))
###Output
_____no_output_____
###Markdown
Question 3 What is the value you get by adding the GP values at indices 15 and 16 of this table?
###Code
print type(data.loc[15:16, 'gp'])
data.loc[15:16, 'gp']
sum(data.loc[15:16, 'gp'])
###Output
_____no_output_____
###Markdown
Assignment 5 Barry Becker extracted a reasonably clean subset of the 1994, U.S. Census database, with a goal of running predictions to determine whether a person makes over 50K a year. The dataset is hosted on the University of California, Irvine's Machine Learning Repository and includes features such as the person's age, occupation, and hours worked per week, etc. As clean as the data is, it still isn't quite ready for analysis by SciKit-Learn! Using what you've learned in this chapter, clean up the various columns by encode them properly using the best practices so that they're ready to be examined. We've included a modifies subset of the dataset at Module2/Datasets/census.data and also have some started code to get you going located at Module2/assignment5.py. Load up the dataset and set header label names to: ['education', 'age', 'capital-gain', 'race', 'capital-loss', 'hours-per-week', 'sex', 'classification'] Ensure you use the right command to do this, as there is more than one command! To verify you used the correct one, open the dataset in a text editor like SublimeText or Notepad, and double check your df.head() to ensure the first values match up. Make sure any value that needs to be replaced with a NAN is set as such. There are at least three ways to do this. One is much easier than the other two. Look through the dataset and ensure all of your columns have appropriate data types. Numeric columns should be float64 or int64, and textual columns should be object. Properly encode any ordinal features using the method discussed in the chapter. Properly encode any nominal features by exploding them out into new, separate, boolean features.
###Code
# %load 'assignment5.py'
import pandas as pd
import numpy as np
#
# TODO:
# Load up the dataset, setting correct header labels.
#
# .. your code here ..
#
# TODO:
# Use basic pandas commands to look through the dataset... get a
# feel for it before proceeding! Do the data-types of each column
# reflect the values you see when you look through the data using
# a text editor / spread sheet program? If you see 'object' where
# you expect to see 'int32' / 'float64', that is a good indicator
# that there is probably a string or missing value in a column.
# use `your_data_frame['your_column'].unique()` to see the unique
# values of each column and identify the rogue values. If these
# should be represented as nans, you can convert them using
# na_values when loading the dataframe.
#
# .. your code here ..
#
# TODO:
# Look through your data and identify any potential categorical
# features. Ensure you properly encode any ordinal and nominal
# types using the methods discussed in the chapter.
#
# Be careful! Some features can be represented as either categorical
# or continuous (numerical). If you ever get confused, think to yourself
# what makes more sense generally---to represent such features with a
# continuous numeric type... or a series of categories?
#
# .. your code here ..
#
# TODO:
# Print out your dataframe
#
# .. your code here ..
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
%matplotlib inline
data = pd.read_csv('Datasets/census.data', header = None, usecols = list(range(1,9)))
data.head()
len(data)
cols = ['education', 'age', 'capital-gain', 'race', 'capital-loss', 'hours-per-week', 'sex', 'classification']
data.columns = cols
data.head()
data.dtypes
#for i in ['education', 'race', 'sex', 'classification']:
#data[i].value_counts().plot(kind = 'bar')
data['education'].value_counts().plot(kind = 'bar')
data['capital-gain'] = pd.to_numeric(data['capital-gain'], errors = 'coerce')
data.dtypes
data = pd.get_dummies(data, columns = ['race', 'sex'])
data.head()
for i in ['education', 'hours-per-week']:
for x in list(set(data[i])):
print x
print ''
orderedEducation = ["Preschool", "1st-4th", "5th-6th", "7th-8th", "9th", "10th", "11th", "12th", "HS-grad",
"Some-college", "Bachelors", "Masters", "Doctorate"]
data['education'] = data['education'].astype("category",
ordered = True, categories = orderedEducation).cat.codes
data.head()
###Output
_____no_output_____ |
English/8. Recursion.ipynb | ###Markdown
1.1.3. Recursion Learning Objectives* [Divide-and-conquer or decrease-and-conquer](divide)* [Examples of real-life problems that are recursive in nature](real)* [Recursive steps vs base case](step)* [Recursion vs Iteration](rec-iter) Divide-and-conquer or decrease-and-conquer Algorithmically: a way to design solutions to problems by **divide-and-conquer** or **decrease-and-conquer**. “Divide and conquer” algorithm solve a hard problem by breaking it into a set of subproblems such that: * sub-problems are easier to solve than the original * solutions of the sub-problems can be combined to solve the originalSemantically: a programming technique where a function calls itself* in programming, goal is to NOT have infinite recursion * must have 1 or more base cases that are easy to solve * must solve the same problem on some other input with the goal of simplifying the larger problem input Have you ever done this before? Open your browser right now and type "recursion" on Google. Did you notice the **“Did you mean: recursion”** message? Uhm yes, but what does it mean really? Go further, click on that message. It will appear again. Click again. There it is again. Click… ENOUGH! **Recursion** is the process of repeating items in a self-similar way. * A recursive function is a function that calls itself within its definition.* This can be hard to get your head around at first, but think of it as a breaking a big problem down into doing a small problem many times over.* This means that a complex problem can be made increasingly simpler by repeatedly doing a simpler and simpler and simpler form of the same problem with each repetition.* However, we must provide a 'simplest form' of the function where the function stops, otherwise it will repeat forever and throw an error.* We call this 'simplest form' a base case.* This is best illustrated with an example:
###Code
# Function that takes in as input the starting number to countdown from
def countdown(n):
# base case: this is where the function will eventually stop
if n == 0:
print(0)
# here we reduce the problem into a simpler version
else:
# we print the countdown number
print(n)
# we repeat the function with the next smallest number
countdown(n-1)
countdown(5)
###Output
5
4
3
2
1
0
###Markdown
Examples of real-life problems that are recursive in nature Here are some examples from our daily life:**DNA** ([Source](https://qph.fs.quoracdn.net/main-qimg-905203aa42ecfa447e613c1dee2e3b4e-c))**Romanesco broccoli**: its pattern has been modeled as a recursive helical arrangement of cones. ([Source](https://qph.fs.quoracdn.net/main-qimg-2d3fccb284d0e185d9d20b8d0268bb32-c))**Russian dolls** ([Source](http://pythonpracticeprojects.com/real-world-recursion.html)) Recursive steps vs base case **recursive step** * think how to reduce problem to a simpler/smaller version of same problem **base case*** keep reducing problem until reach a simple case that can be solved directly * when b = 1, a*b = a You can see recursive step and base case part in the multiplication example shown below: ([Source](https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-0001-introduction-to-computer-science-and-programming-in-python-fall-2016/lecture-slides-code/MIT6_0001F16_Lec6.pdf))* In a base case, we compute the result immediately given the inputs to the function call.* In a recursive step, we compute the result with the help of one or more recursive calls to this same function, but with the inputs somehow reduced in size or complexity, closer to a base case. As a code break, let's see if you can write your very own, first recursive function to take one input to the power of the other. Remembering that:- $a^{b} = a \times a \times a \times ... \times a $: b times
###Code
## It's coding time!!!
###Output
_____no_output_____
###Markdown
Recursion vs Iteration * looping constructs (while and for loops) lead to iterative algorithms * can capture computation in a set of state variables that update on each iteration through loopA program is called __recursive__ when an entity calls itself. A program is called __iterative__ when there is a loop (or repetition). Example: Program to find the factorial of a number. Remember that the factorial of a number $x$, denoted as $x!$, is given by:- $x!$ = $x \times (x-1) \times (x-2) \times ... \times 2 \times 1 = x \times (x-1)!$- e.g. - $3! = 3 \times 2 \times 1 = 6$ - $4! = 4 \times 3 \times 2 \times 1 = 4 \times 3! = 24$
###Code
# ----- Recursion -----
# method to find factorial of given number
def factorialUsingRecursion(n):
# base case
if (n == 0):
return 1;
# recursion call
return n * factorialUsingRecursion(n - 1);
# ----- Iteration -----
# Method to find the factorial of a given number
def factorialUsingIteration(n):
res = 1;
# using iteration
for i in range(2, n + 1):
res *= i;
return res;
# Driver method
num = 5;
print("Factorial of",num,"using Recursion is:",
factorialUsingRecursion(5));
print("Factorial of",num,"using Iteration is:",
factorialUsingIteration(5));
# This code is contributed by mits
###Output
_____no_output_____ |
MachineLearning_code_examples/Sklearn_pipelines.ipynb | ###Markdown
SKLEARN PIEPLINES---Materials prepared, or collected and modified by: __Pawel Rosikiewicz__, www.SimpleAI.ch CONTENT* My Infopages * Code examples SOURCE MATERIALSlinks to source materials, and additional readings, were added to text or code in each section directly.   CODE EXAMPLES:---
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
from sklearn.datasets import load_breast_cancer
from sklearn import neighbors, preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.datasets import make_classification # creates simple data egxample
###Output
_____no_output_____
###Markdown
import my helper functions
###Code
from src.utils.ml_model_metrics import plot_confusion_matrix # creates annotated heatmap for confusion matrix
###Output
_____no_output_____
###Markdown
Step 1. create example data
###Code
X, y = make_classification(
n_samples=1000,
n_features=20,
n_informative=10,
n_redundant=2,
n_repeated=0,
n_classes=4,
n_clusters_per_class=2
)
print(f'X shape: {X.shape}')
print(f'y shape: {y.shape}')
# are classes balanced
print("target:\n",pd.Series(y).value_counts())
###Output
X shape: (1000, 20)
y shape: (1000,)
target:
3 251
0 250
2 250
1 249
dtype: int64
###Markdown
create simple classyficaiton model
###Code
# split to train/test
X_tr, X_te, y_tr, y_te = train_test_split(
X, y, train_size=0.7, random_state=0)
# scale input data
scaler = preprocessing.StandardScaler().fit(X_tr)
X_tr = scaler.transform(X_tr)
X_te = scaler.transform(X_te)
# Create classifier & train it
clf = neighbors.KNeighborsClassifier(n_neighbors=2)
clf.fit(X_tr, y_tr)
# predict test values and check summary
y_pred = clf.predict(X_te)
# Function, ..............................
def show_results(model, y_tr, y_te):
'helper funtion to examine classyficaiotn results'
print(f'train_acc: {accuracy_score(y_tr, model.predict(X_tr))}')
print(f'test_acc: {accuracy_score(y_te, model.predict(X_te))}')
plot_confusion_matrix(X_te, y_te, model,
with_perc=True, cmap="coolwarm", figsize=(5,4))
# create confusion matrix, with % of predicted classes in each row
show_results(clf, y_tr, y_te)
###Output
train_acc: 0.8128571428571428
test_acc: 0.5833333333333334
###Markdown
Use Pipeline Function to find best k-value__Pipeline function__ is used to encapsulate multiple steps with pipelinehttps://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html
###Code
from sklearn.pipeline import Pipeline
# each step requires a name, and a function
pipe = Pipeline([
('scaler', preprocessing.StandardScaler()),
('knn',neighbors.KNeighborsClassifier(n_neighbors=5))
])
# test different k values
results = []
for k in list(range(2,30)):
# set k in the pipeline
pipe.set_params(knn__n_neighbors=k)
pipe.fit(X_tr, y_tr)
# collect the results
results.append({
'k':k,
'train_acc': accuracy_score(y_tr, pipe.predict(X_tr)),
'test_acc': accuracy_score(y_te, pipe.predict(X_te))
})
# convert the results to pd.dataframe & plot them
gs = pd.DataFrame(results)
# plot results
plt.style.use("ggplot")
plt.plot(gs.loc[:,'k'], gs.loc[:,'train_acc'], label="train acc")
plt.plot(gs.loc[:,'k'], gs.loc[:,'test_acc'], label="test acc")
plt.xlabel("k")
plt.ylabel("accuracy")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
retrain model with bext k-value and plot the results with test data
###Code
# find best k, and show confusion matrix
best_k = gs.sort_values(by="test_acc", ascending=False).k.iloc[0]
# retrain the model with bst_k
pipe.set_params(knn__n_neighbors=best_k)
pipe.fit(X_tr, y_tr)
# create confusion matrix, with % of predicted classes in each row
show_results(clf, y_tr, y_te)
###Output
train_acc: 0.8128571428571428
test_acc: 0.5833333333333334
###Markdown
check parameters in pipeline
###Code
# to see parameters at each step
pipe.get_params()
# returns: {'memory': None,
#. 'steps': [( ...
# basic text info
pipe.named_steps
# returns: {'scaler': StandardScaler(), 'knn': KNeighborsClassifier()}
# visualize the pipeline
from sklearn import set_config
set_config(display="diagram"); pipe
# beatiful visualizations (HTML)
###Output
_____no_output_____
###Markdown
disable the step, remove, etc..
###Code
# retrain the model with bst_k
pipe.set_params(scaler=None)
pipe.fit(X_tr, y_tr)
# create confusion matrix, with % of predicted classes in each row
show_results(pipe, y_tr, y_te)
pipe.get_params()
###Output
_____no_output_____
###Markdown
Use ParameterGrid to search fro optimal comabination of Hyperparameters---
###Code
from sklearn.model_selection import ParameterGrid
# create a basic grid and tun it.
''' important,
if you use None, or eg standardscaller,
the object must be provided in the list
'''
grid = ParameterGrid({
'scaler':[None, preprocessing.StandardScaler()],
'knn__n_neighbors': list(range(2,8))
})
# you may also create unique comabinations,
# ...ie groups of hyperparameters that are not mixed with each other
'''each group must be in separate dct'''
grid = (
{'scaler':[None], # even one param must be in a list
'knn__n_neighbors': list(range(2,4))
},
{'scaler':[preprocessing.StandardScaler()],
'knn__n_neighbors': list(range(5,6))
}
)
list(ParameterGrid(grid))
###Output
_____no_output_____
###Markdown
we can access all elements in a parameter grid as in the list,here is example, of simple funciton that I am using to store the results
###Code
def my_grid_search(pipe, grid):
# test different k values
results = []
for params in grid:
# set k in the pipeline
pipe.set_params(**params)
pipe.fit(X_tr, y_tr)
# collect the results
results.append({
**params,
'train_acc': accuracy_score(y_tr, pipe.predict(X_tr)),
'test_acc': accuracy_score(y_te, pipe.predict(X_te))
})
# convert the results to pd.dataframe & list top 5
gs = pd.DataFrame(results)
print(gs.sort_values(by='test_acc', ascending=False).head())
###Output
_____no_output_____
###Markdown
Finally run it on an example:
###Code
# create sklearn pipeline with the classifier
pipe = Pipeline([
('scaler', None), # you must add that step, otherwise it may be a problem,
('knn',neighbors.KNeighborsClassifier(n_neighbors=5))
])
# define parameter grid
grid = ParameterGrid({
'scaler':[None, preprocessing.StandardScaler()],
'knn__n_neighbors': list(range(2,8))
})
# find best hyperparameters
my_grid_search(pipe, grid)
###Output
knn__n_neighbors scaler train_acc test_acc
6 5 None 0.787143 0.670000
7 5 StandardScaler() 0.787143 0.670000
4 4 None 0.801429 0.663333
5 4 StandardScaler() 0.801429 0.663333
2 3 None 0.835714 0.640000
###Markdown
make_pipeline vs pipeline functions & applying custom transformers* make_pipeline * creates the same type of objects as Pipleine() functio * gives names for each step automatically, * very usefull for preprocessing functions, * I use it, often to create smaller pipelines for data transfomers, * eg I create a transfomer and later provide it into the final pipeline created with Pipeline() function  example
###Code
from sklearn.preprocessing import FunctionTransformer # creates custom transfomers
from sklearn.pipeline import make_pipeline # like pipeline function, but give step names automatically,
from sklearn.preprocessing import OneHotEncoder, StandardScaler, KBinsDiscretizer # skleanr transformers,
from sklearn.compose import ColumnTransformer # allows using different transformers to different columns
# create custom transformer
log_scale_transformer = make_pipeline(
FunctionTransformer(np.abs, validate=False), # see below
FunctionTransformer(np.log1p, validate=False), # creates runtime warning if negative data are used
StandardScaler()
)
# use ColumnTransformer to create data preprocessor
'''we can aplly different tranfomers to different columns
- give unique names for each transformer
- passthrough - keyword, nothing is done with that column
- column names are always provided as LIST
'''
data_preprocessor = ColumnTransformer(
transformers=[
("passthrough_numeric", "passthrough", list(range(1,10))),
("log_scaled_numeric", log_scale_transformer, [0]),
("binned_numeric", KBinsDiscretizer(n_bins=10, encode="ordinal"), [11, 12]), # is encode="onehot", all bins are in different columns, Caution, it does not drop 1 column 1
],
remainder="drop", # what to do with other columns? TWO OPTION {‘drop’, ‘passthrough’}, if drop, these are removed.
)
transformed_data = data_preprocessor.fit_transform(X_tr)
transformed_data.shape
###Output
_____no_output_____
###Markdown
now lest try to run it,
###Code
# create sklearn pipeline with the classifier
pipe = Pipeline([
('data_preprocessor', data_preprocessor), # you must add that step, otherwise it may be a problem,
('knn',neighbors.KNeighborsClassifier(n_neighbors=5))
])
pipe.fit(X_tr, y_tr)
# visualize the pipeline
from sklearn import set_config
set_config(display="diagram"); pipe
# just to see how did i channged the accuracy
# define parameter grid
grid = ParameterGrid({
'knn__n_neighbors': list(range(2,8))
})
# find best hyperparameters
my_grid_search(pipe, grid)
###Output
knn__n_neighbors train_acc test_acc
4 6 0.691429 0.540000
2 4 0.721429 0.536667
3 5 0.698571 0.533333
5 7 0.690000 0.533333
1 3 0.744286 0.500000
###Markdown
cross-validation with Scikit-learn---__INTRODUCTION__* Scikit-learn allows implementing several strategies for cross-validation https://scikit-learn.org/stable/modules/cross_validation.html* Important: * it is important to not use test data to learn, on estimator, but also on scaling, feature selection etc... * skleanr funcitons such as Pipeline, make_pipeline, or crossvalidation help you with that __KEY FUNCTIONS__ * train_test_split * fast method t generate one test/train set split, * with random shuffle of rowns in the df, * not very usefull, for tuning hyperparameters (eg. alpha, and c), because data in train/test sets, may affect the results, * __cross_validate__ - used in code examples below, - allows specifying multiple metrics for evaluation. - allows using different Cross validation iterators - returns a dict containing * fit-times, * scores, for test data, * optionally: training scores with fitted estimators - The multiple metrics can be specified either as a list, tuple or set of predefined scorer names; > from sklearn.metrics import recall_score > scoring = ['precision_macro', 'recall_macro'] > clf = svm.SVC(kernel='linear', C=1, random_state=0) > scores = cross_validate(clf, X, y, scoring=scoring) > sorted(scores.keys()) > ['fit_time', 'score_time', 'test_precision_macro', 'test_recall_macro'] > scores['test_recall_macro'] > array([0.96..., 1. ..., 0.96..., 0.96..., 1. ]) __CV Iteratoors__ * __KFold__ - divides all the samples in k groups of samples, called folds - if k=n, this is equivalent to the Leave One Out strategy - The prediction function is learned using k-1 folds, and the fold left out is used for test. > from sklearn.model_selection import KFold > kfold = KFold(n_splits=3, shuffle=True, random_state=0) * __ShuffleSplit__ - creates, n different train/tests sets, by shuffling the data, - equalivalent to applying ntimes train_test_split() - Samples are first shuffled and then split into a pair of train and test sets - It is possible to control the randomness for reproducibility of the results by explicitly seeding the random_state pseudo random number generator__STRATIFIED CV__ * __Stratified k-fold__ - default iterator in cross_validate function, - type of k-fold which returns stratified folds: * ie. each set contains approximately the same percentage of samples of each target class as the complete set. - default iterator in cross_validate function, > from sklearn.model_selection import StratifiedKFold > skf = StratifiedKFold(n_splits=3) * __StratifiedShuffleSplit__ - type of ShuffleSplit, which returns stratified splits, __Other types__* __RepeatedKFold__ - repeats K-Fold n times, producing different splits in each repetition > from sklearn.model_selection import RepeatedKFold > random_state = 12883823 > rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=random_state)* __Leave One Out (LOO)__ - Each learning set is created by taking all the samples except one - the test set being the sample left out, - __Pros:__ do not waste much data, almostz all are uzsed for traning, - __Cons:__ * test rersults have high varinace in accuracy, * models, constructed from almost all data are virtally identical, * As a general rule, most authors, and empirical evidence, suggest that 5- or 10- fold cross validation should be preferred to LOO. > from sklearn.model_selection import LeaveOneOut > loo = LeaveOneOut() * __Leave P Out (LPO)__ - creates all the possible training/test sets by removing p samples from the complete set. > from sklearn.model_selection import LeavePOut > lpo = LeavePOut(p=2) example 1. Cross-validation with KFold, & cross_validate
###Code
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_validate
# Create k-fold object
''' remember to shuffle the data,
by default shuffle = False
'''
kfold = KFold(n_splits=3, shuffle=True, random_state=0)
# create simple pipeline
pipe = make_pipeline(
StandardScaler(),
neighbors.KNeighborsClassifier()
)
# Apply cross-validation to find optimal hypeparameters,
# Option 1 - Use custom kfold obj
scores = cross_validate(
pipe, # model,
X, y, # inpiut data,
cv=kfold, # cross vakldation object,
return_train_score=True
)
scores
# {'fit_time': array([0.00321221, 0.00180292, 0.0014317 ]),
# 'score_time': array([0.02344394, 0.01779819, 0.01449227]),
# 'test_score': array([0.64371257, 0.66366366, 0.65165165]),
# 'train_score': array([0.78528529, 0.76161919, 0.7856072 ])}
"""
'fit_time', - time to fit the estimator,
'score_time', - time to evaluate the est...
'test_score', - perfomrmance of the (k-1) parts used to train the est..
'train_score' - performance on the remaning "validation set"
"""
# .. (option 2) Use build-in k-fold cross validation,
''' by default it uses stratified k-fold strategy,
that ensures that categories are eqally mixed and represented, in each class
'''
scores = cross_validate(
pipe, # model,
X, y, # inpiut data,
cv=3, # JUST GIVE AND INT WITH K CLASSES,
# . most often used varinats are 3, 5 and 10!
return_train_score=True
)
"""
Important Issue;
large difference in accuracy, between train and test sets
- by default k-fold splits, data without shuffling,
in our case, data were organized by species, thus,
part of the data was never seen by the the model, and test accuracy
may be much lower then the acc of the train sets,
""";
# evaluate results
print('Tr mean: {:.3f} std: {:.3f}'.format(
np.mean(scores['train_score']), np.std(scores['train_score'])))
print('Te mean: {:.3f} std: {:.3f}'.format(
np.mean(scores['test_score']), np.std(scores['test_score'])))
###Output
Tr mean: 0.788 std: 0.011
Te mean: 0.625 std: 0.035
###Markdown
you can add custom set of metrics to cross_validate functionhttps://scikit-learn.org/stable/modules/grid_search.htmlmultimetric-grid-search
###Code
from sklearn.metrics import recall_score
scoring = ['precision_macro', 'recall_macro', 'accuracy']
scores = cross_validate(
pipe, # model,
X, y, # inpiut data,
cv=kfold, # cross vakldation object,
return_train_score=True,
scoring=scoring
)
scores
###Output
_____no_output_____
###Markdown
other types of CV strategy - examples
###Code
"""
ShuffleSplit
--------------
- creates, n different train/tests sets, ny shuffling the data,
- equalivalent to applying ntimes train_test_split()
"""
from sklearn.model_selection import ShuffleSplit
cv_type = ShuffleSplit(
n_splits=10,
test_size=20,
random_state=0
)
###Output
_____no_output_____
###Markdown
GridSearchCV---- used similarly to model.fit(X,y)- returns, both, the model and the results, in fitted object- https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.htmlsklearn.model_selection.GridSearchCV
###Code
from sklearn.model_selection import GridSearchCV
# create pipeline
pipe = Pipeline([
('scaler', None), # you must add that step, otherwise it may be a problem,
('knn',neighbors.KNeighborsClassifier(n_neighbors=5))
])
# define parameter grid; dct with lists
grid = {'knn__n_neighbors': list(range(2,8))}
# Create k-fold object
kfold = KFold(n_splits=3, shuffle=True, random_state=0)
# set scoring methods
scoring = ['precision_macro', 'recall_macro', 'accuracy']
# create GridSearchCV object
grid_cv = GridSearchCV(
estimator=pipe, param_grid=grid, cv=kfold,
return_train_score=True, n_jobs=-1
)
# fit, the model & tune parameters,
grid_cv.fit(X,y)
# see the model, as pipe
grid_cv
###Output
_____no_output_____
###Markdown
chec the best model
###Code
# youi may make prediciotns
y_te_predicted = grid_cv.predict(X_te)
# get the "best" model
best_model = grid_cv.best_estimator_
y_te_predicted_best = best_model.predict(X_te)
# examing hyperarameters, and score from the best model
best_score = grid_cv.best_score_
best_model_params = grid_cv.best_params_
print(best_score)
print(best_model_params)
###Output
0.7560075045104986
{'knn__n_neighbors': 5}
###Markdown
examine the process of gridsearch
###Code
# you may see available scores here, per split,
sorted(gridcv_results.cv_results_.keys())
#. ['mean_fit_time',
#. 'mean_score_time',
#. 'mean_test_score',
#. 'mean_train_score' ...
# you may call all of them,
'''caution params are in dct,
and if saved as txt, file, they will be hard to reload
'''
df_res = pd.DataFrame(gridcv_results.cv_results_)
df_res.head(3)
# here is what you can do to add params as separate columns
df_res = pd.concat([
df_res,
pd.DataFrame(df_res.params.to_list())
], axis=1)
df_res.head(2)
# k values were added as the last column called knn__n_neighbors
# other parameters will be treated in the same way
# Function, ........................................................
def plot_model_results(ax, params, means, sd, label=""):
'''creates a lne plot, and fill between lines on provided axis object
. params; used as labels on x-axis
. means; y-axis values
. sd; values used to create fillin area, ±1/2sd abouve each mean
. label; labels for a line plotted
'''
# create the plot
x_axis = np.arange(params.size)
ax.plot(x_axis,means,linewidth=3, label=label)
ax.fill_between( x_axis, means-sd/2, means+sd/2, alpha=0.4)
# add x-tick labels
ax.set_xticks(x_axis)
ax.set_xticklabels(params.tolist(), rotation=0, fontsize=10)
# set default style and create a figure
mpl.rcParams.update(mpl.rcParamsDefault)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(3,3))
# plot test scores
plot_model_results(ax=ax,params=df_res.knn__n_neighbors,
means=df_res.mean_test_score, sd=df_res.std_test_score, label="test"
)
# plot train scores
plot_model_results(ax=ax,params=df_res.knn__n_neighbors,
means=df_res.mean_train_score, sd=df_res.std_train_score, label="train"
)
# add labels
ax.set(xlabel="k", ylabel="accuracy")
ax.legend()
# plint info and show the plot
print(f"best score: {best_score}")
print(f"best model params: {best_model_params}")
plt.show();
###Output
best score: 0.7560075045104986
best model params: {'knn__n_neighbors': 5}
|
docs/basic_use.ipynb | ###Markdown
Getting startedThe main interface to PySM is the `pysm3.Sky` class, the simplest way is to specify the required resolution as $N_{side}$ *HEALPix* parameter and the requested models as a list of strings, for example the simplest models for galactic dust and synchrotron `["d1", "s1"]`
###Code
import pysm3
import pysm3.units as u
import healpy as hp
import numpy as np
import warnings
warnings.filterwarnings("ignore")
sky = pysm3.Sky(nside=128, preset_strings=["d1", "s1"])
###Output
_____no_output_____
###Markdown
PySM initializes the requested component objects (generally load the input templates maps with `astropy.utils.data` and cache them locally in `~/.astropy`) and stores them in the `components` attribute (a list):
###Code
sky.components
###Output
_____no_output_____
###Markdown
PySM 3 uses `astropy.units`: http://docs.astropy.org/en/stable/units/ each input needs to have a unit attached to it, the unit just needs to be compatible, e.g. you can use either `u.GHz` or `u.MHz`.
###Code
map_100GHz = sky.get_emission(100 * u.GHz)
###Output
_____no_output_____
###Markdown
The output of the `get_emission` method is a 2D `numpy` array in the usual `healpy` convention, `[I,Q,U]`, by default in $\mu K_{RJ}$:
###Code
map_100GHz[0, :3]
###Output
_____no_output_____
###Markdown
Optionally convert to another unit using `astropy.units`
###Code
map_100GHz = map_100GHz.to(u.uK_CMB, equivalencies=u.cmb_equivalencies(100*u.GHz))
import matplotlib.pyplot as plt
%matplotlib inline
hp.mollview(map_100GHz[0], min=0, max=1e2, title="I map", unit=map_100GHz.unit)
hp.mollview(np.sqrt(map_100GHz[1]**2 + map_100GHz[2]**2), title="P map", min=0, max=1e1, unit=map_100GHz.unit)
###Output
_____no_output_____
###Markdown
Getting startedThe main interface to PySM is the `pysm.Sky` class, the simplest way is to specify the required resolution as $N_{side}$ *HEALPix* parameter and the requested models as a list of strings, for example the simplest models for galactic dust and synchrotron `["d1", "s1"]`
###Code
import pysm
import pysm.units as u
import healpy as hp
import numpy as np
sky = pysm.Sky(nside=128, preset_strings=["d1", "s1"])
###Output
_____no_output_____
###Markdown
PySM initializes the requested component objects (generally load the input templates maps with `astropy.utils.data` and cache them locally in `~/.astropy`) and stores them in the `components` attribute (a list):
###Code
sky.components
###Output
_____no_output_____
###Markdown
PySM 3 uses `astropy.units`: http://docs.astropy.org/en/stable/units/ each input needs to have a unit attached to it, the unit just needs to be compatible, e.g. you can use either `u.GHz` or `u.MHz`.
###Code
map_100GHz = sky.get_emission(100 * u.GHz)
###Output
_____no_output_____
###Markdown
The output of the `get_emission` method is a 2D `numpy` array in the usual `healpy` convention, `[I,Q,U]`, by default in $\mu K_{RJ}$:
###Code
map_100GHz[0, :3]
###Output
_____no_output_____
###Markdown
Optionally convert to another unit using `astropy.units`
###Code
map_100GHz = map_100GHz.to(u.uK_CMB, equivalencies=u.cmb_equivalencies(100*u.GHz))
import matplotlib.pyplot as plt
%matplotlib inline
hp.mollview(map_100GHz[0], min=0, max=1e2, title="I map", unit=map_100GHz.unit)
hp.mollview(np.sqrt(map_100GHz[1]**2 + map_100GHz[2]**2), title="P map", min=0, max=1e1, unit=map_100GHz.unit)
###Output
_____no_output_____ |
S04PythonCrashCourse/L01PythonCrashCourse.ipynb | ###Markdown
PYTHON CRASH COURSE Nov 23, 2021 Author: Chaithra Kopparam Cheluvaiah STRING - formatting - immutable- slicing
###Code
num = 12
name= 'Chaithra'
'my num is {} and my name is {}'.format(num, name)
'my num is {one} and my name is {two}, more {one}'.format(one=num, two=name) # more suggested way
# no need to worry about formatting being in exact same order
var = 'chaithra kopparam cheluvaiah'
var[5] # indexing the string
var[0:] # all the characters starting from index 0
var[3:5] # all the characters starting from index 3 - 4
var[:] # all the characters
var[0] = 'z' # strings are immutable
var[::-1] # reversing the strinng
var[::-2] # keeps subtracting step size 2 from the end index
# var = 'chaithra kopparam cheluvaiah'
var[::3] # keeps adding step size 3 to the start index
###Output
_____no_output_____
###Markdown
LIST- slicing- mutable- nested list
###Code
my_list = ['a','b','c','d','e']
my_list[1:4] # list slicing
my_list[1:5] # last index does not exist but it doesnt give any error
my_list[0] = 'New' # list is mutable
my_list
nested_list = [1,2,[3,4]]
nested_list[2]
nested_list[2][0] # in numpy, this syntax is used for 2d array selection
deep_nested = [1,2,[3,4,['target']]]
deep_nested[2]
deep_nested[2][2] # note that output is still a list with single element
deep_nested[2][2][0]
###Output
_____no_output_____
###Markdown
DICTIONARIES - nested dicts- keys are immutable
###Code
d = {'key1':'v1', 'k2':'v2', 'k3':123}
# indexing dictionary
d['key1']
d['k3']
# dictionaries can take in any items as their values
d = {'k1':[1,2,3]}
d['k1']
d['k1'][0]
my_list = d['k1'] # better coding
my_list[0]
# nested dictionaries
d = {'k1':{'innerkey':[1,2,3]}}
# lets say, we want to access 2 from the list
d['k1']['innerkey'][1]
###Output
_____no_output_____
###Markdown
TUPLE - Immutable
###Code
# tuple
t = (1,2,3)
# indexing tuple
t[0]
t[0] = 'NEW'
###Output
_____no_output_____
###Markdown
SET- creating set from list- add()
###Code
{1,2,3,1,1,1,1,1,2,3,4} # keeps unique values
set([1,2,3,1,1,1,1,1,2,3,4]) # passing list to set constructor to grab unique elements
# add items to set
s = {1,2,3}
s.add(5)
s
s.add(5) # it wont retreive error
s # it just keeps unique elements
###Output
_____no_output_____
###Markdown
Logical Operators
###Code
1 < 2
1 >= 2
1 == 1
1 == 2
1 != 3
'hi' == 'bye'
'hi' != 'bye'
(1 < 2) and (2 < 3) # paranthesis makes it more readable
(1 > 2) and (2 < 3)
###Output
_____no_output_____
###Markdown
CONDITIONAL STATEMENTS
###Code
if 1 < 5:
print('yep!')
if 1 == 2:
print('First')
elif 3 == 3:
print('Middle')
else:
print('Last')
if 1 == 2:
print('First')
elif 4 == 4:
print('second') # it is going to execute only this block and exit
#even though other statements might be true below
elif 3 == 3:
print('Middle')
else:
print('Last')
###Output
second
###Markdown
LOOPS- for- while
###Code
seq = [1,2,3,4,5]
for item in seq:
print(item)
i = 1
while i < 5:
print('i is {}'.format(i))
i = i + 1
###Output
i is 1
i is 2
i is 3
i is 4
###Markdown
RANGE
###Code
range(0,5) # returns range object
list(range(5))
list(range(0,5)) # 0 is redundant
for num in range(7,5): # no error even though end < start. for loop will not run
print(num)
for num in range(7,10):
print(num)
###Output
7
8
9
###Markdown
LIST COMPREHENSION - FOR LOOP BUT BACKWARDS
###Code
x = [1,2,3,4,5]
out = []
for num in x:
out.append(num **2)
out
[num**2 for num in x] # for loop but backwards - reduced lines of code : can be done using map() also
out = [num**2 for num in x]
out
###Output
_____no_output_____
###Markdown
FUNCTIONS
###Code
def my_func(name): # function name starts with lower case letters
print('Hello, '+name)
my_func('Chaithra')
def my_func(name='Default Name'): # if you want default value to one of the parameters
print('Hello, '+name)
my_func()
my_func(name='Chai') # you can fully explain what you are passing to function
my_func # function object will be returned
# function returning a value
def square(num):
return num ** 2
output = square(2)
output
# functions have documentation string using triple enlosing quote
# triple enclosing quotes basically allows you to put in giant string
def square(num):
"""
This is a docstring.
can go multiple lines.
This function squares a number
"""
return num ** 2
square # shift + Tab to see the signature and docstring
###Output
_____no_output_____
###Markdown
MAP & FILTER
###Code
def times2(num): return num * 2
# converting to lambda ( anonymous functions)
# remove redudant keywords - def, fucntion name, return
t = lambda num: num*2
t(12)
times2(6)
seq = [1,2,3,4,5]
map(times2, seq)
list(map(times2, seq)) # casting to list
list(map(lambda num: num*2, seq))
# filter for even numbers in the seq
filter(lambda num: num%2==0, seq)
list(filter(lambda num: num%2==0, seq))
###Output
_____no_output_____
###Markdown
METHODS STRING METHODS- upper- lower- split
###Code
s = 'hello my name is chaithra'
s.lower()
s.upper()
s.split() # useful for text analysis
# default separator is any whitespace.
txt = "welc\nome\n to the jungle" # delimiter is newline and space
x = txt.split()
print(x)
tweet = 'Go Sports! #Sports'
tweet.split('#')
###Output
_____no_output_____
###Markdown
DICTIONARY METHODS- keys- values- items
###Code
d = {'k1':1, 'k2':2}
d.keys()
d.items()
d.values()
vals = d.values() # dict_values object cannot be indexed
vals[0] # cannot be indexed
vals = list(d.values())
vals[0]
###Output
_____no_output_____
###Markdown
LIST METHODS- pop- pop with index- append
###Code
lst = [1,2,3]
lst.pop() # change is permanent
lst
lst = [1,2,3,4,5]
item = lst.pop()
item
first = lst.pop(0) # pop with index
print(lst)
print(first)
lst.append('NEW') # append new element to end of the list
lst
###Output
_____no_output_____
###Markdown
IN
###Code
'x' in [1,2,3,4,5]
'x' in ['x','y','z']
'dog' in 'martha is a dog!'
###Output
_____no_output_____
###Markdown
TUPLE UNPACKING
###Code
x = [(1,2),(3,4),(5,6)]
x[0]
x[0][1]
for item in x:
print(item)
# tuple unpacking works when iterating over list of tuples
for (a,b) in x:
print(a)
for a,b in x: # paranthesis are optional
print(b)
###Output
2
4
6
###Markdown
STRING FORMATTING IN JPMC INTERVIEW
###Code
# when we want to have specific decimal places
print('%.2f'%39.5)
print('%.2f'%39)
# {[argument_index_or_keyword]:[width][.precision][type]}
'{:.2f}'.format(38.9)
'{:.1f}'.format(38.28) # rounds off
'{num:.2f}'.format(num=38.9)
'{num:10.2f}'.format(num=38.9)
###Output
_____no_output_____ |
Tutorial-ScalarWave.ipynb | ###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Generating C Code for the Scalar Wave Equation in Cartesian Coordinates Authors: Zach Etienne & Thiago Assumpção Formatting improvements courtesy Brandon Clark This module generates the C Code for the Scalarwave in Cartesian coordinates and sets up either monochromatic plane wave or spherical Gaussian [Initial Data](https://en.wikipedia.org/wiki/Initial_value_problem). **Notebook Status:** Validated **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented below ([right-hand-side expressions](code_validation1); [initial data expressions](code_validation2)). In addition, all expressions have been validated against a trusted code (the [original SENR/NRPy+ code](https://bitbucket.org/zach_etienne/nrpy)). NRPy+ Source Code for this module: * [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py)* [ScalarWave/InitialData.py](../edit/ScalarWave/InitialData.py) Introduction: Problem StatementWe wish to numerically solve the scalar wave equation as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem) in Cartesian coordinates:$$\partial_t^2 u = c^2 \nabla^2 u \text{,}$$where $u$ (the amplitude of the wave) is a function of time and space: $u = u(t,x,y,...)$ (spatial dimension as-yet unspecified) and $c$ is the wave speed, subject to some initial condition$$u(0,x,y,...) = f(x,y,...)$$and suitable spatial boundary conditions.As described in the next section, we will find it quite useful to define$$v(t,x,y,...) = \partial_t u(t,x,y,...).$$In this way, the second-order PDE is reduced to a set of two coupled first-order PDEs\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \nabla^2 u.\end{align}We will use NRPy+ to generate efficient C codes capable of generating both initial data $u(0,x,y,...) = f(x,y,...)$; $v(0,x,y,...)=g(x,y,...)$, as well as finite-difference expressions for the right-hand sides of the above expressions. These expressions are needed within the *Method of Lines* to "integrate" the solution forward in time. The Method of LinesOnce we have initial data, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs)**, so long as the initial value problem PDE can be written in the form$$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$where $\mathbf{M}$ is an $N\times N$ matrix filled with differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$. The scalar wave equation as written in the [previous module](Tutorial-ScalarWave.ipynb)\begin{equation}\partial_t \begin{bmatrix}u \\v \end{bmatrix}=\begin{bmatrix}0 & 1 \\c^2 \nabla^2 & 0 \end{bmatrix}\begin{bmatrix}u \\v \end{bmatrix}\end{equation}satisfies this requirement. Thus we can treat the spatial derivatives $\nabla^2 u$ of the scalar wave equation using **standard finite-difference approaches**, and the temporal derivatives $\partial_t u$ and $\partial_t v$ using **standard approaches for solving ODEs**. In [the next module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will apply the highly robust [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4), used widely for numerically solving ODEs, to "march" (integrate) the solution vector $\vec{f}$ forward in time from its initial value ("initial data"). Basic AlgorithmThe basic algorithm for solving the scalar wave equation [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), based on the Method of Lines (see section above) is outlined below, with NRPy+-based components highlighted in green. We will review how NRPy+ generates these core components in this module.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. Set gridfunction values to initial data.1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. Evaluate scalar wave RHS expressions. 1. Apply boundary conditions.**We refer to the right-hand side of the equation $\partial_t \vec{f} = \mathbf{M}\ \vec{f}$ as the RHS. In this case, we refer to the $\mathbf{M}\ \vec{f}$ as the "scalar wave RHSs".** In the following sections we will 1. Use NRPy+ to cast the scalar wave RHS expressions -- in finite difference form -- into highly efficient C code, 1. first in one spatial dimension with fourth-order finite differences, 1. and then in three spatial dimensions with tenth-order finite differences.1. Use NRPy+ to generate monochromatic plane-wave initial data for the scalar wave equation, where the wave propagates in an arbitrary direction.As for the $\nabla^2 u$ term, spatial derivatives are handled in NRPy+ via [finite differencing](https://en.wikipedia.org/wiki/Finite_difference).We will sample the solution $\{u,v\}$ at discrete, uniformly-sampled points in space and time. For simplicity, let's assume that we consider the wave equation in one spatial dimension. Then the solution at any sampled point in space and time is given by$$u^n_i = u(t_n,x_i) = u(t_0 + n \Delta t, x_0 + i \Delta x),$$where $\Delta t$ and $\Delta x$ represent the temporal and spatial resolution, respectively. $v^n_i$ is sampled at the same points in space and time. Table of Contents$$\label{toc}$$1. [Step 1](initializenrpy): Initialize core NRPy+ modules1. [Step 2](rhss1d): Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing1. [Step 3](rhss3d): Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing 1. [Step 3.a](code_validation1): Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module1. [Step 4](id): Setting up Initial Data for the Scalar Wave Equation 1. [Step 4.a](planewave): The Monochromatic Plane-Wave Solution 1. [Step 4.b](sphericalgaussian): The Spherical Gaussian Solution (*Courtesy Thiago Assumpção*)1. [Step 5](code_validation2): Code Validation against `ScalarWave.InitialData` NRPy+ module1. [Step 6](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Initialize core NRPy+ modules \[Back to [top](toc)\]$$\label{initializenrpy}$$Let's start by importing all the needed modules from NRPy+:
###Code
# Step P1: Import needed NRPy+ core modules:
import NRPy_param_funcs as par # NRPy+: Parameter interface
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import grid as gri # NRPy+: Functions having to do with numerical grids
import finite_difference as fin # NRPy+: Finite difference C code generation module
from outputC import lhrh # NRPy+: Core C code output module
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
###Output
_____no_output_____
###Markdown
Step 2: Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss1d}$$To minimize complication, we will first restrict ourselves to solving the wave equation in one spatial dimension, so$$\nabla^2 u = \partial_x^2 u.$$Extension of this operator to higher spatial dimensions is straightforward, particularly when using NRPy+.As was discussed in [the finite difference section of the tutorial](Tutorial-Finite_Difference_Derivatives.ipynb), NRPy+ approximates derivatives using [finite difference methods](), the second-order derivative $\partial_x^2$ accurate to fourth-order in uniform grid spacing $\Delta x$ (from fitting the unique 4th-degree polynomial to 5 sample points of $u$) is given by\begin{equation}\left[\partial_x^2 u(t,x)\right]_j = \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right)+ \mathcal{O}\left((\Delta x)^4\right).\end{equation}
###Code
# Step P2: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
thismodule = "ScalarWave"
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 1: Set the spatial dimension parameter, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",1)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4)
# Step 3: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 4: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 5: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
vv_rhs = sp.simplify(vv_rhs)
# Step 6: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)])
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expression:
* "const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1/3 - uu_i0m2/12 + 4*uu_i0p1/3 - uu_i0p2/12)"
*/
const double uu_i0m2 = in_gfs[IDX2(UUGF, i0-2)];
const double uu_i0m1 = in_gfs[IDX2(UUGF, i0-1)];
const double uu = in_gfs[IDX2(UUGF, i0)];
const double uu_i0p1 = in_gfs[IDX2(UUGF, i0+1)];
const double uu_i0p2 = in_gfs[IDX2(UUGF, i0+2)];
const double vv = in_gfs[IDX2(VVGF, i0)];
const double FDPart1_Rational_5_2 = 5.0/2.0;
const double FDPart1_Rational_1_12 = 1.0/12.0;
const double FDPart1_Rational_4_3 = 4.0/3.0;
const double uu_dDD00 = ((invdx0)*(invdx0))*(FDPart1_Rational_1_12*(-uu_i0m2 - uu_i0p2) + FDPart1_Rational_4_3*(uu_i0m1 + uu_i0p1) - FDPart1_Rational_5_2*uu);
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[rhs_gfs[IDX2(UUGF, i0)] = vv,
* rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*wavespeed**2]"
*/
rhs_gfs[IDX2(UUGF, i0)] = vv;
rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*((wavespeed)*(wavespeed));
}
###Markdown
**Success!** Notice that indeed NRPy+ was able to compute the spatial derivative operator,\begin{equation}\left[\partial_x^2 u(t,x)\right]_j \approx \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right),\end{equation}correctly (easier to read in the "Original SymPy expressions" comment block at the top of the C output. Note that `invdx0`$=1/\Delta x_0$, where $\Delta x_0$ is the (uniform) grid spacing in the zeroth, or $x_0$ direction. Step 3: Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss3d}$$Let's next repeat the same process, only this time at **10th** finite difference order, for the **3-spatial-dimension** scalar wave equation, with SIMD enabled:
###Code
# Step 1: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 2: Set the spatial dimension parameter
# to *FOUR* this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 3: Set the finite differencing order to 10.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",10)
# Step 4a: Reset gridfunctions registered in 1D case above,
# to avoid NRPy+ throwing an error about double-
# registering gridfunctions, which is not allowed.
gri.glb_gridfcs_list = []
# Step 4b: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 5: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 6: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
# Step 7: Simplify the expression for c^2 \nabla^2 u (a.k.a., vv_rhs):
vv_rhs = sp.simplify(vv_rhs)
# Step 8: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)],params="SIMD_enable=True")
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY uu_dDD00 = invdx0**2*(-5269*uu/1800 + 5*uu_i0m1_i1_i2/3 - 5*uu_i0m2_i1_i2/21 + 5*uu_i0m3_i1_i2/126 - 5*uu_i0m4_i1_i2/1008 + uu_i0m5_i1_i2/3150 + 5*uu_i0p1_i1_i2/3 - 5*uu_i0p2_i1_i2/21 + 5*uu_i0p3_i1_i2/126 - 5*uu_i0p4_i1_i2/1008 + uu_i0p5_i1_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD11 = invdx1**2*(-5269*uu/1800 + 5*uu_i0_i1m1_i2/3 - 5*uu_i0_i1m2_i2/21 + 5*uu_i0_i1m3_i2/126 - 5*uu_i0_i1m4_i2/1008 + uu_i0_i1m5_i2/3150 + 5*uu_i0_i1p1_i2/3 - 5*uu_i0_i1p2_i2/21 + 5*uu_i0_i1p3_i2/126 - 5*uu_i0_i1p4_i2/1008 + uu_i0_i1p5_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD22 = invdx2**2*(-5269*uu/1800 + 5*uu_i0_i1_i2m1/3 - 5*uu_i0_i1_i2m2/21 + 5*uu_i0_i1_i2m3/126 - 5*uu_i0_i1_i2m4/1008 + uu_i0_i1_i2m5/3150 + 5*uu_i0_i1_i2p1/3 - 5*uu_i0_i1_i2p2/21 + 5*uu_i0_i1_i2p3/126 - 5*uu_i0_i1_i2p4/1008 + uu_i0_i1_i2p5/3150)]"
*/
const REAL_SIMD_ARRAY uu_i0_i1_i2m5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-5)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-1)]);
const REAL_SIMD_ARRAY uu_i0_i1m5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-1,i2)]);
const REAL_SIMD_ARRAY uu_i0m5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-1,i1,i2)]);
const REAL_SIMD_ARRAY uu = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+1,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+1)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+5)]);
const REAL_SIMD_ARRAY vv = ReadSIMD(&in_gfs[IDX4(VVGF, i0,i1,i2)]);
const double tmpFDPart1_NegativeOne_ = -1.0;
const REAL_SIMD_ARRAY FDPart1_NegativeOne_ = ConstSIMD(tmpFDPart1_NegativeOne_);
const double tmpFDPart1_Rational_1_3150 = 1.0/3150.0;
const REAL_SIMD_ARRAY FDPart1_Rational_1_3150 = ConstSIMD(tmpFDPart1_Rational_1_3150);
const double tmpFDPart1_Rational_5269_1800 = 5269.0/1800.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5269_1800 = ConstSIMD(tmpFDPart1_Rational_5269_1800);
const double tmpFDPart1_Rational_5_1008 = 5.0/1008.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_1008 = ConstSIMD(tmpFDPart1_Rational_5_1008);
const double tmpFDPart1_Rational_5_126 = 5.0/126.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_126 = ConstSIMD(tmpFDPart1_Rational_5_126);
const double tmpFDPart1_Rational_5_21 = 5.0/21.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_21 = ConstSIMD(tmpFDPart1_Rational_5_21);
const double tmpFDPart1_Rational_5_3 = 5.0/3.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_3 = ConstSIMD(tmpFDPart1_Rational_5_3);
const REAL_SIMD_ARRAY FDPart1_0 = MulSIMD(FDPart1_Rational_5269_1800, uu);
const REAL_SIMD_ARRAY uu_dDD00 = MulSIMD(MulSIMD(invdx0, invdx0), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0m3_i1_i2, uu_i0p3_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0m1_i1_i2, uu_i0p1_i1_i2), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0m5_i1_i2, uu_i0p5_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0m4_i1_i2, uu_i0p4_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0m2_i1_i2, uu_i0p2_i1_i2), FDPart1_0))))));
const REAL_SIMD_ARRAY uu_dDD11 = MulSIMD(MulSIMD(invdx1, invdx1), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0_i1m3_i2, uu_i0_i1p3_i2), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0_i1m1_i2, uu_i0_i1p1_i2), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0_i1m5_i2, uu_i0_i1p5_i2), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0_i1m4_i2, uu_i0_i1p4_i2), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0_i1m2_i2, uu_i0_i1p2_i2), FDPart1_0))))));
const REAL_SIMD_ARRAY uu_dDD22 = MulSIMD(MulSIMD(invdx2, invdx2), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0_i1_i2m3, uu_i0_i1_i2p3), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0_i1_i2m1, uu_i0_i1_i2p1), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0_i1_i2m5, uu_i0_i1_i2p5), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0_i1_i2m4, uu_i0_i1_i2p4), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0_i1_i2m2, uu_i0_i1_i2p2), FDPart1_0))))));
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY __RHS_exp_0 = vv,
* const REAL_SIMD_ARRAY __RHS_exp_1 = wavespeed**2*(uu_dDD00 + uu_dDD11 + uu_dDD22)]"
*/
const REAL_SIMD_ARRAY __RHS_exp_0 = vv;
const REAL_SIMD_ARRAY __RHS_exp_1 = MulSIMD(MulSIMD(wavespeed, wavespeed), AddSIMD(uu_dDD00, AddSIMD(uu_dDD11, uu_dDD22)));
WriteSIMD(&rhs_gfs[IDX4(UUGF, i0, i1, i2)], __RHS_exp_0);
WriteSIMD(&rhs_gfs[IDX4(VVGF, i0, i1, i2)], __RHS_exp_1);
}
###Markdown
Step 3.a: Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation1}$$Here, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the three-spatial-dimension Scalar Wave equation (i.e., `uu_rhs` and `vv_rhs`) between1. this tutorial and 2. the [NRPy+ ScalarWave.ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) module.
###Code
# Step 10: We already have SymPy expressions for uu_rhs and vv_rhs in
# terms of other SymPy variables. Even if we reset the list
# of NRPy+ gridfunctions, these *SymPy* expressions for
# uu_rhs and vv_rhs *will remain unaffected*.
#
# Here, we will use the above-defined uu_rhs and vv_rhs to
# validate against the same expressions in the
# ScalarWave/ScalarWave_RHSs.py module,
# to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the ScalarWave_RHSs.py module itself.
#
# Reset the list of gridfunctions, as registering a gridfunction
# twice will spawn an error.
gri.glb_gridfcs_list = []
# Step 11: Call the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module,
# which should do exactly the same as in Steps 1-10 above.
import ScalarWave.ScalarWave_RHSs as swrhs
swrhs.ScalarWave_RHSs()
# Step 12: Consistency check between the tutorial notebook above
# and the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_rhs - swrhs.uu_rhs = "+str(sp.simplify(uu_rhs - swrhs.uu_rhs))+"\t\t (should be zero)")
print("vv_rhs - swrhs.vv_rhs = "+str(sp.simplify(vv_rhs - swrhs.vv_rhs))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_rhs - swrhs.uu_rhs = 0 (should be zero)
vv_rhs - swrhs.vv_rhs = 0 (should be zero)
###Markdown
Step 4: Setting up Initial Data for the Scalar Wave Equation \[Back to [top](toc)\]$$\label{id}$$ Step 4.a: The Monochromatic Plane-Wave Solution \[Back to [top](toc)\]$$\label{planewave}$$The solution to the scalar wave equation for a monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction is$$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$where $\hat{k}$ is a unit vector. We choose $f(\hat{k}\cdot\vec{x} - c t)$ to take the form$$f(\hat{k}\cdot\vec{x} - c t) = \sin\left(\hat{k}\cdot\vec{x} - c t\right) + 2,$$where we add the $+2$ to ensure that the exact solution never crosses through zero. In places where the exact solution passes through zero, the relative error (i.e., the measure of error to compare numerical with exact results) is undefined. Also, $f(\hat{k}\cdot\vec{x} - c t)$ plus a constant is still a solution to the wave equation.
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx # Sets the Cartesian coordinates xx[0]=x; xx[1]=y; xx[2]=z
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
kk = par.Cparameters("REAL", thismodule, ["kk0", "kk1", "kk2"],[1.0,1.0,1.0])
# Step 3: Normalize the k vector
kk_norm = sp.sqrt(kk[0]**2 + kk[1]**2 + kk[2]**2)
# Step 4: Compute k.x
dot_product = sp.sympify(0)
for i in range(DIM):
dot_product += xx[i]*kk[i]
dot_product /= kk_norm
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID_PlaneWave = sp.sin(dot_product - wavespeed*time)+2
vv_ID_PlaneWave = sp.diff(uu_ID_PlaneWave, time)
###Output
_____no_output_____
###Markdown
Next we verify that $f(\hat{k}\cdot\vec{x} - c t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\ f\left(\hat{k}\cdot\vec{x} - c t\right),$$and confirming the result is exactly zero.
###Code
sp.simplify(wavespeed**2*(sp.diff(uu_ID_PlaneWave,xx[0],2) +
sp.diff(uu_ID_PlaneWave,xx[1],2) +
sp.diff(uu_ID_PlaneWave,xx[2],2))
- sp.diff(uu_ID_PlaneWave,time,2))
###Output
_____no_output_____
###Markdown
Step 4.b: The Spherical Gaussian Solution \[Back to [top](toc)\]$$\label{sphericalgaussian}$$Here we will implement the spherical Gaussian solution, consists of ingoing and outgoing wave fronts:\begin{align}u(r,t) &= u_{\rm out}(r,t) + u_{\rm in}(r,t),\ \ \text{where}\\u_{\rm out}(r,t) &=\frac{r-ct}{r} \exp\left[\frac{-(r-ct)^2}{2 \sigma^2}\right] \\u_{\rm in}(r,t) &=\frac{r+ct}{r} \exp\left[\frac{-(r+ct)^2}{2 \sigma^2}\right] \\\end{align}where $c$ is the wavespeed, and $\sigma$ is the width of the Gaussian (i.e., the "standard deviation").
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx # Sets the Cartesian coordinates xx[0]=x; xx[1]=y; xx[2]=z
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
sigma = par.Cparameters("REAL", thismodule, "sigma",3.0)
# Step 4: Compute r
r = sp.sympify(0)
for i in range(DIM):
r += xx[i]**2
r = sp.sqrt(r)
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID_SphericalGaussianOUT = +(r - wavespeed*time)/r * sp.exp( -(r - wavespeed*time)**2 / (2*sigma**2) )
uu_ID_SphericalGaussianIN = +(r + wavespeed*time)/r * sp.exp( -(r + wavespeed*time)**2 / (2*sigma**2) )
uu_ID_SphericalGaussian = uu_ID_SphericalGaussianOUT + uu_ID_SphericalGaussianIN
vv_ID_SphericalGaussian = sp.diff(uu_ID_SphericalGaussian, time)
###Output
_____no_output_____
###Markdown
Since the wave equation is linear, both the leftgoing and rightgoing waves must satisfy the wave equation, which implies that their sum also satisfies the wave equation. Next we verify that $u(r,t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\left\{u_{\rm R}(r,t)\right\},$$and$$\left(c^2 \nabla^2 - \partial_t^2 \right)\left\{u_{\rm L}(r,t)\right\},$$are separately zero. We do this because SymPy has difficulty simplifying the combined expression.
###Code
print(sp.simplify(wavespeed**2*(sp.diff(uu_ID_SphericalGaussianOUT,xx[0],2) +
sp.diff(uu_ID_SphericalGaussianOUT,xx[1],2) +
sp.diff(uu_ID_SphericalGaussianOUT,xx[2],2))
- sp.diff(uu_ID_SphericalGaussianOUT,time,2)) )
print(sp.simplify(wavespeed**2*(sp.diff(uu_ID_SphericalGaussianIN,xx[0],2) +
sp.diff(uu_ID_SphericalGaussianIN,xx[1],2) +
sp.diff(uu_ID_SphericalGaussianIN,xx[2],2))
- sp.diff(uu_ID_SphericalGaussianIN,time,2)))
###Output
0
0
###Markdown
Step 5: Code Validation against `ScalarWave.InitialData` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation2}$$As a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between1. this tutorial and 2. the NRPy+ [ScalarWave.InitialData](../edit/ScalarWave/InitialData.py) module.
###Code
# We just defined SymPy expressions for uu_ID and vv_ID in
# terms of other SymPy variables. Here, we will use the
# above-defined uu_ID and vv_ID to validate against the
# same expressions in the ScalarWave/InitialData.py
# module, to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the PlaneWave ID module itself.
#
# Step 6: Call the InitialData(Type="PlaneWave") function from within the
# ScalarWave/InitialData.py module,
# which should do exactly the same as in Steps 1-5 above.
import ScalarWave.InitialData as swid
swid.InitialData(Type="PlaneWave")
# Step 7: Consistency check between the tutorial notebook above
# and the PlaneWave option from within the
# ScalarWave/InitialData.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module: PlaneWave Case")
if sp.simplify(uu_ID_PlaneWave - swid.uu_ID) != 0:
print("TEST FAILED: uu_ID_PlaneWave - swid.uu_ID = "+str(sp.simplify(uu_ID_PlaneWave - swid.uu_ID))+"\t\t (should be zero)")
sys.exit(1)
if sp.simplify(vv_ID_PlaneWave - swid.vv_ID) != 0:
print("TEST FAILED: vv_ID_PlaneWave - swid.vv_ID = "+str(sp.simplify(vv_ID_PlaneWave - swid.vv_ID))+"\t\t (should be zero)")
sys.exit(1)
print("TESTS PASSED!")
# Step 8: Consistency check between the tutorial notebook above
# and the SphericalGaussian option from within the
# ScalarWave/InitialData.py module.
swid.InitialData(Type="SphericalGaussian")
print("Consistency check between ScalarWave tutorial and NRPy+ module: SphericalGaussian Case")
if sp.simplify(uu_ID_SphericalGaussian - swid.uu_ID) != 0:
print("TEST FAILED: uu_ID_SphericalGaussian - swid.uu_ID = "+str(sp.simplify(uu_ID_SphericalGaussian - swid.uu_ID))+"\t\t (should be zero)")
sys.exit(1)
if sp.simplify(vv_ID_SphericalGaussian - swid.vv_ID) != 0:
print("TEST FAILED: vv_ID_SphericalGaussian - swid.vv_ID = "+str(sp.simplify(vv_ID_SphericalGaussian - swid.vv_ID))+"\t\t (should be zero)")
sys.exit(1)
print("TESTS PASSED!")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module: PlaneWave Case
TESTS PASSED!
Consistency check between ScalarWave tutorial and NRPy+ module: SphericalGaussian Case
TESTS PASSED!
###Markdown
Step 6: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-ScalarWave.pdf](Tutorial-ScalarWave.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ScalarWave")
###Output
Created Tutorial-ScalarWave.tex, and compiled LaTeX file to PDF file
Tutorial-ScalarWave.pdf
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Generating C Code for the Scalar Wave Equation in Cartesian Coordinates Authors: Zach Etienne & Thiago Assumpção Formatting improvements courtesy Brandon Clark This module generates the C Code for the Scalarwave in Cartesian coordinates and sets up either monochromatic plane wave or spherical Gaussian [Initial Data](https://en.wikipedia.org/wiki/Initial_value_problem). **Notebook Status:** Validated **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented below ([right-hand-side expressions](code_validation1); [initial data expressions](code_validation2)). In addition, all expressions have been validated against a trusted code (the [original SENR/NRPy+ code](https://bitbucket.org/zach_etienne/nrpy)). NRPy+ Source Code for this module: * [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py)* [ScalarWave/InitialData.py](../edit/ScalarWave/InitialData.py) Introduction: Problem StatementWe wish to numerically solve the scalar wave equation as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem) in Cartesian coordinates:$$\partial_t^2 u = c^2 \nabla^2 u \text{,}$$where $u$ (the amplitude of the wave) is a function of time and space: $u = u(t,x,y,...)$ (spatial dimension as-yet unspecified) and $c$ is the wave speed, subject to some initial condition$$u(0,x,y,...) = f(x,y,...)$$and suitable spatial boundary conditions.As described in the next section, we will find it quite useful to define$$v(t,x,y,...) = \partial_t u(t,x,y,...).$$In this way, the second-order PDE is reduced to a set of two coupled first-order PDEs\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \nabla^2 u.\end{align}We will use NRPy+ to generate efficient C codes capable of generating both initial data $u(0,x,y,...) = f(x,y,...)$; $v(0,x,y,...)=g(x,y,...)$, as well as finite-difference expressions for the right-hand sides of the above expressions. These expressions are needed within the *Method of Lines* to "integrate" the solution forward in time. The Method of LinesOnce we have initial data, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs)**, so long as the initial value problem PDE can be written in the form$$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$where $\mathbf{M}$ is an $N\times N$ matrix filled with differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$. The scalar wave equation as written in the [previous module](Tutorial-ScalarWave.ipynb)\begin{equation}\partial_t \begin{bmatrix}u \\v \end{bmatrix}=\begin{bmatrix}0 & 1 \\c^2 \nabla^2 & 0 \end{bmatrix}\begin{bmatrix}u \\v \end{bmatrix}\end{equation}satisfies this requirement. Thus we can treat the spatial derivatives $\nabla^2 u$ of the scalar wave equation using **standard finite-difference approaches**, and the temporal derivatives $\partial_t u$ and $\partial_t v$ using **standard approaches for solving ODEs**. In [the next module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will apply the highly robust [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4), used widely for numerically solving ODEs, to "march" (integrate) the solution vector $\vec{f}$ forward in time from its initial value ("initial data"). Basic AlgorithmThe basic algorithm for solving the scalar wave equation [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), based on the Method of Lines (see section above) is outlined below, with NRPy+-based components highlighted in green. We will review how NRPy+ generates these core components in this module.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. Set gridfunction values to initial data.1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. Evaluate scalar wave RHS expressions. 1. Apply boundary conditions.**We refer to the right-hand side of the equation $\partial_t \vec{f} = \mathbf{M}\ \vec{f}$ as the RHS. In this case, we refer to the $\mathbf{M}\ \vec{f}$ as the "scalar wave RHSs".** In the following sections we will 1. Use NRPy+ to cast the scalar wave RHS expressions -- in finite difference form -- into highly efficient C code, 1. first in one spatial dimension with fourth-order finite differences, 1. and then in three spatial dimensions with tenth-order finite differences.1. Use NRPy+ to generate monochromatic plane-wave initial data for the scalar wave equation, where the wave propagates in an arbitrary direction.As for the $\nabla^2 u$ term, spatial derivatives are handled in NRPy+ via [finite differencing](https://en.wikipedia.org/wiki/Finite_difference).We will sample the solution $\{u,v\}$ at discrete, uniformly-sampled points in space and time. For simplicity, let's assume that we consider the wave equation in one spatial dimension. Then the solution at any sampled point in space and time is given by$$u^n_i = u(t_n,x_i) = u(t_0 + n \Delta t, x_0 + i \Delta x),$$where $\Delta t$ and $\Delta x$ represent the temporal and spatial resolution, respectively. $v^n_i$ is sampled at the same points in space and time. Table of Contents$$\label{toc}$$1. [Step 1](initializenrpy): Initialize core NRPy+ modules1. [Step 2](rhss1d): Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing1. [Step 3](rhss3d): Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing 1. [Step 3.a](code_validation1): Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module1. [Step 4](id): Setting up Initial Data for the Scalar Wave Equation 1. [Step 4.a](planewave): The Monochromatic Plane-Wave Solution 1. [Step 4.b](sphericalgaussian): The Spherical Gaussian Solution (*Courtesy Thiago Assumpção*)1. [Step 5](code_validation2): Code Validation against `ScalarWave.InitialData` NRPy+ module1. [Step 6](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Initialize core NRPy+ modules \[Back to [top](toc)\]$$\label{initializenrpy}$$Let's start by importing all the needed modules from NRPy+:
###Code
# Step P1: Import needed NRPy+ core modules:
import NRPy_param_funcs as par # NRPy+: Parameter interface
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import grid as gri # NRPy+: Functions having to do with numerical grids
import finite_difference as fin # NRPy+: Finite difference C code generation module
from outputC import lhrh # NRPy+: Core C code output module
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
###Output
_____no_output_____
###Markdown
Step 2: Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss1d}$$To minimize complication, we will first restrict ourselves to solving the wave equation in one spatial dimension, so$$\nabla^2 u = \partial_x^2 u.$$Extension of this operator to higher spatial dimensions is straightforward, particularly when using NRPy+.As was discussed in [the finite difference section of the tutorial](Tutorial-Finite_Difference_Derivatives.ipynb), NRPy+ approximates derivatives using [finite difference methods](), the second-order derivative $\partial_x^2$ accurate to fourth-order in uniform grid spacing $\Delta x$ (from fitting the unique 4th-degree polynomial to 5 sample points of $u$) is given by\begin{equation}\left[\partial_x^2 u(t,x)\right]_j = \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right)+ \mathcal{O}\left((\Delta x)^4\right).\end{equation}
###Code
# Step P2: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
thismodule = "ScalarWave"
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 1: Set the spatial dimension parameter, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",1)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4)
# Step 3: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 4: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 5: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
vv_rhs = sp.simplify(vv_rhs)
# Step 6: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)])
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expression:
* "const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1/3 - uu_i0m2/12 + 4*uu_i0p1/3 - uu_i0p2/12)"
*/
const double uu_i0m2 = in_gfs[IDX2(UUGF, i0-2)];
const double uu_i0m1 = in_gfs[IDX2(UUGF, i0-1)];
const double uu = in_gfs[IDX2(UUGF, i0)];
const double uu_i0p1 = in_gfs[IDX2(UUGF, i0+1)];
const double uu_i0p2 = in_gfs[IDX2(UUGF, i0+2)];
const double vv = in_gfs[IDX2(VVGF, i0)];
const double FDPart1_Rational_5_2 = 5.0/2.0;
const double FDPart1_Rational_1_12 = 1.0/12.0;
const double FDPart1_Rational_4_3 = 4.0/3.0;
const double uu_dDD00 = ((invdx0)*(invdx0))*(FDPart1_Rational_1_12*(-uu_i0m2 - uu_i0p2) + FDPart1_Rational_4_3*(uu_i0m1 + uu_i0p1) - FDPart1_Rational_5_2*uu);
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[rhs_gfs[IDX2(UUGF, i0)] = vv,
* rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*wavespeed**2]"
*/
rhs_gfs[IDX2(UUGF, i0)] = vv;
rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*((wavespeed)*(wavespeed));
}
###Markdown
**Success!** Notice that indeed NRPy+ was able to compute the spatial derivative operator,\begin{equation}\left[\partial_x^2 u(t,x)\right]_j \approx \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right),\end{equation}correctly (easier to read in the "Original SymPy expressions" comment block at the top of the C output. Note that `invdx0`$=1/\Delta x_0$, where $\Delta x_0$ is the (uniform) grid spacing in the zeroth, or $x_0$ direction. Step 3: Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss3d}$$Let's next repeat the same process, only this time at **10th** finite difference order, for the **3-spatial-dimension** scalar wave equation, with SIMD enabled:
###Code
# Step 1: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 2: Set the spatial dimension parameter
# to *FOUR* this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 3: Set the finite differencing order to 10.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",10)
# Step 4a: Reset gridfunctions registered in 1D case above,
# to avoid NRPy+ throwing an error about double-
# registering gridfunctions, which is not allowed.
gri.glb_gridfcs_list = []
# Step 4b: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 5: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 6: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
# Step 7: Simplify the expression for c^2 \nabla^2 u (a.k.a., vv_rhs):
vv_rhs = sp.simplify(vv_rhs)
# Step 8: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)],params="SIMD_enable=True")
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY uu_dDD00 = invdx0**2*(-5269*uu/1800 + 5*uu_i0m1_i1_i2/3 - 5*uu_i0m2_i1_i2/21 + 5*uu_i0m3_i1_i2/126 - 5*uu_i0m4_i1_i2/1008 + uu_i0m5_i1_i2/3150 + 5*uu_i0p1_i1_i2/3 - 5*uu_i0p2_i1_i2/21 + 5*uu_i0p3_i1_i2/126 - 5*uu_i0p4_i1_i2/1008 + uu_i0p5_i1_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD11 = invdx1**2*(-5269*uu/1800 + 5*uu_i0_i1m1_i2/3 - 5*uu_i0_i1m2_i2/21 + 5*uu_i0_i1m3_i2/126 - 5*uu_i0_i1m4_i2/1008 + uu_i0_i1m5_i2/3150 + 5*uu_i0_i1p1_i2/3 - 5*uu_i0_i1p2_i2/21 + 5*uu_i0_i1p3_i2/126 - 5*uu_i0_i1p4_i2/1008 + uu_i0_i1p5_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD22 = invdx2**2*(-5269*uu/1800 + 5*uu_i0_i1_i2m1/3 - 5*uu_i0_i1_i2m2/21 + 5*uu_i0_i1_i2m3/126 - 5*uu_i0_i1_i2m4/1008 + uu_i0_i1_i2m5/3150 + 5*uu_i0_i1_i2p1/3 - 5*uu_i0_i1_i2p2/21 + 5*uu_i0_i1_i2p3/126 - 5*uu_i0_i1_i2p4/1008 + uu_i0_i1_i2p5/3150)]"
*/
const REAL_SIMD_ARRAY uu_i0_i1_i2m5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-5)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-1)]);
const REAL_SIMD_ARRAY uu_i0_i1m5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-1,i2)]);
const REAL_SIMD_ARRAY uu_i0m5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-1,i1,i2)]);
const REAL_SIMD_ARRAY uu = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+1,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+1)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+5)]);
const REAL_SIMD_ARRAY vv = ReadSIMD(&in_gfs[IDX4(VVGF, i0,i1,i2)]);
const double tmpFDPart1_NegativeOne_ = -1.0;
const REAL_SIMD_ARRAY FDPart1_NegativeOne_ = ConstSIMD(tmpFDPart1_NegativeOne_);
const double tmpFDPart1_Rational_1_3150 = 1.0/3150.0;
const REAL_SIMD_ARRAY FDPart1_Rational_1_3150 = ConstSIMD(tmpFDPart1_Rational_1_3150);
const double tmpFDPart1_Rational_5269_1800 = 5269.0/1800.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5269_1800 = ConstSIMD(tmpFDPart1_Rational_5269_1800);
const double tmpFDPart1_Rational_5_1008 = 5.0/1008.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_1008 = ConstSIMD(tmpFDPart1_Rational_5_1008);
const double tmpFDPart1_Rational_5_126 = 5.0/126.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_126 = ConstSIMD(tmpFDPart1_Rational_5_126);
const double tmpFDPart1_Rational_5_21 = 5.0/21.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_21 = ConstSIMD(tmpFDPart1_Rational_5_21);
const double tmpFDPart1_Rational_5_3 = 5.0/3.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_3 = ConstSIMD(tmpFDPart1_Rational_5_3);
const REAL_SIMD_ARRAY FDPart1_0 = MulSIMD(FDPart1_Rational_5269_1800, uu);
const REAL_SIMD_ARRAY uu_dDD00 = MulSIMD(MulSIMD(invdx0, invdx0), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0m3_i1_i2, uu_i0p3_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0m1_i1_i2, uu_i0p1_i1_i2), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0m5_i1_i2, uu_i0p5_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0m4_i1_i2, uu_i0p4_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0m2_i1_i2, uu_i0p2_i1_i2), FDPart1_0))))));
const REAL_SIMD_ARRAY uu_dDD11 = MulSIMD(MulSIMD(invdx1, invdx1), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0_i1m3_i2, uu_i0_i1p3_i2), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0_i1m1_i2, uu_i0_i1p1_i2), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0_i1m5_i2, uu_i0_i1p5_i2), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0_i1m4_i2, uu_i0_i1p4_i2), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0_i1m2_i2, uu_i0_i1p2_i2), FDPart1_0))))));
const REAL_SIMD_ARRAY uu_dDD22 = MulSIMD(MulSIMD(invdx2, invdx2), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0_i1_i2m3, uu_i0_i1_i2p3), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0_i1_i2m1, uu_i0_i1_i2p1), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0_i1_i2m5, uu_i0_i1_i2p5), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0_i1_i2m4, uu_i0_i1_i2p4), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0_i1_i2m2, uu_i0_i1_i2p2), FDPart1_0))))));
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY __RHS_exp_0 = vv,
* const REAL_SIMD_ARRAY __RHS_exp_1 = wavespeed**2*(uu_dDD00 + uu_dDD11 + uu_dDD22)]"
*/
const REAL_SIMD_ARRAY __RHS_exp_0 = vv;
const REAL_SIMD_ARRAY __RHS_exp_1 = MulSIMD(MulSIMD(wavespeed, wavespeed), AddSIMD(uu_dDD00, AddSIMD(uu_dDD11, uu_dDD22)));
WriteSIMD(&rhs_gfs[IDX4(UUGF, i0, i1, i2)], __RHS_exp_0);
WriteSIMD(&rhs_gfs[IDX4(VVGF, i0, i1, i2)], __RHS_exp_1);
}
###Markdown
Step 3.a: Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation1}$$Here, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the three-spatial-dimension Scalar Wave equation (i.e., `uu_rhs` and `vv_rhs`) between1. this tutorial and 2. the [NRPy+ ScalarWave.ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) module.
###Code
# Step 10: We already have SymPy expressions for uu_rhs and vv_rhs in
# terms of other SymPy variables. Even if we reset the list
# of NRPy+ gridfunctions, these *SymPy* expressions for
# uu_rhs and vv_rhs *will remain unaffected*.
#
# Here, we will use the above-defined uu_rhs and vv_rhs to
# validate against the same expressions in the
# ScalarWave/ScalarWave_RHSs.py module,
# to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the ScalarWave_RHSs.py module itself.
#
# Reset the list of gridfunctions, as registering a gridfunction
# twice will spawn an error.
gri.glb_gridfcs_list = []
# Step 11: Call the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module,
# which should do exactly the same as in Steps 1-10 above.
import ScalarWave.ScalarWave_RHSs as swrhs
swrhs.ScalarWave_RHSs()
# Step 12: Consistency check between the tutorial notebook above
# and the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_rhs - swrhs.uu_rhs = "+str(sp.simplify(uu_rhs - swrhs.uu_rhs))+"\t\t (should be zero)")
print("vv_rhs - swrhs.vv_rhs = "+str(sp.simplify(vv_rhs - swrhs.vv_rhs))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_rhs - swrhs.uu_rhs = 0 (should be zero)
vv_rhs - swrhs.vv_rhs = 0 (should be zero)
###Markdown
Step 4: Setting up Initial Data for the Scalar Wave Equation \[Back to [top](toc)\]$$\label{id}$$ Step 4.a: The Monochromatic Plane-Wave Solution \[Back to [top](toc)\]$$\label{planewave}$$The solution to the scalar wave equation for a monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction is$$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$where $\hat{k}$ is a unit vector. We choose $f(\hat{k}\cdot\vec{x} - c t)$ to take the form$$f(\hat{k}\cdot\vec{x} - c t) = \sin\left(\hat{k}\cdot\vec{x} - c t\right) + 2,$$where we add the $+2$ to ensure that the exact solution never crosses through zero. In places where the exact solution passes through zero, the relative error (i.e., the measure of error to compare numerical with exact results) is undefined. Also, $f(\hat{k}\cdot\vec{x} - c t)$ plus a constant is still a solution to the wave equation.
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx # Sets the Cartesian coordinates xx[0]=x; xx[1]=y; xx[2]=z
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
kk = par.Cparameters("REAL", thismodule, ["kk0", "kk1", "kk2"],[1.0,1.0,1.0])
# Step 3: Normalize the k vector
kk_norm = sp.sqrt(kk[0]**2 + kk[1]**2 + kk[2]**2)
# Step 4: Compute k.x
dot_product = sp.sympify(0)
for i in range(DIM):
dot_product += xx[i]*kk[i]
dot_product /= kk_norm
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID_PlaneWave = sp.sin(dot_product - wavespeed*time)+2
vv_ID_PlaneWave = sp.diff(uu_ID_PlaneWave, time)
###Output
_____no_output_____
###Markdown
Next we verify that $f(\hat{k}\cdot\vec{x} - c t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\ f\left(\hat{k}\cdot\vec{x} - c t\right),$$and confirming the result is exactly zero.
###Code
sp.simplify(wavespeed**2*(sp.diff(uu_ID_PlaneWave,xx[0],2) +
sp.diff(uu_ID_PlaneWave,xx[1],2) +
sp.diff(uu_ID_PlaneWave,xx[2],2))
- sp.diff(uu_ID_PlaneWave,time,2))
###Output
_____no_output_____
###Markdown
Step 4.b: The Spherical Gaussian Solution \[Back to [top](toc)\]$$\label{sphericalgaussian}$$Here we will implement the spherical Gaussian solution, consists of ingoing and outgoing wave fronts:\begin{align}u(r,t) &= u_{\rm out}(r,t) + u_{\rm in}(r,t) + 1,\ \ \text{where}\\u_{\rm out}(r,t) &=\frac{r-ct}{r} \exp\left[\frac{-(r-ct)^2}{2 \sigma^2}\right] \\u_{\rm in}(r,t) &=\frac{r+ct}{r} \exp\left[\frac{-(r+ct)^2}{2 \sigma^2}\right] \\\end{align}where $c$ is the wavespeed, and $\sigma$ is the width of the Gaussian (i.e., the "standard deviation").
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx # Sets the Cartesian coordinates xx[0]=x; xx[1]=y; xx[2]=z
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
sigma = par.Cparameters("REAL", thismodule, "sigma",3.0)
# Step 4: Compute r
r = sp.sympify(0)
for i in range(DIM):
r += xx[i]**2
r = sp.sqrt(r)
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID_SphericalGaussianOUT = +(r - wavespeed*time)/r * sp.exp( -(r - wavespeed*time)**2 / (2*sigma**2) )
uu_ID_SphericalGaussianIN = +(r + wavespeed*time)/r * sp.exp( -(r + wavespeed*time)**2 / (2*sigma**2) )
uu_ID_SphericalGaussian = uu_ID_SphericalGaussianOUT + uu_ID_SphericalGaussianIN + sp.sympify(1)
vv_ID_SphericalGaussian = sp.diff(uu_ID_SphericalGaussian, time)
###Output
_____no_output_____
###Markdown
Since the wave equation is linear, both the leftgoing and rightgoing waves must satisfy the wave equation, which implies that their sum also satisfies the wave equation. Next we verify that $u(r,t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\left\{u_{\rm R}(r,t)\right\},$$and$$\left(c^2 \nabla^2 - \partial_t^2 \right)\left\{u_{\rm L}(r,t)\right\},$$are separately zero. We do this because SymPy has difficulty simplifying the combined expression.
###Code
print(sp.simplify(wavespeed**2*(sp.diff(uu_ID_SphericalGaussianOUT,xx[0],2) +
sp.diff(uu_ID_SphericalGaussianOUT,xx[1],2) +
sp.diff(uu_ID_SphericalGaussianOUT,xx[2],2))
- sp.diff(uu_ID_SphericalGaussianOUT,time,2)) )
print(sp.simplify(wavespeed**2*(sp.diff(uu_ID_SphericalGaussianIN,xx[0],2) +
sp.diff(uu_ID_SphericalGaussianIN,xx[1],2) +
sp.diff(uu_ID_SphericalGaussianIN,xx[2],2))
- sp.diff(uu_ID_SphericalGaussianIN,time,2)))
###Output
0
0
###Markdown
Step 5: Code Validation against `ScalarWave.InitialData` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation2}$$As a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between1. this tutorial and 2. the NRPy+ [ScalarWave.InitialData](../edit/ScalarWave/InitialData.py) module.
###Code
# We just defined SymPy expressions for uu_ID and vv_ID in
# terms of other SymPy variables. Here, we will use the
# above-defined uu_ID and vv_ID to validate against the
# same expressions in the ScalarWave/InitialData.py
# module, to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the PlaneWave ID module itself.
#
# Step 6: Call the InitialData(Type="PlaneWave") function from within the
# ScalarWave/InitialData.py module,
# which should do exactly the same as in Steps 1-5 above.
import ScalarWave.InitialData as swid
swid.InitialData(Type="PlaneWave")
# Step 7: Consistency check between the tutorial notebook above
# and the PlaneWave option from within the
# ScalarWave/InitialData.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module: PlaneWave Case")
if sp.simplify(uu_ID_PlaneWave - swid.uu_ID) != 0:
print("TEST FAILED: uu_ID_PlaneWave - swid.uu_ID = "+str(sp.simplify(uu_ID_PlaneWave - swid.uu_ID))+"\t\t (should be zero)")
sys.exit(1)
if sp.simplify(vv_ID_PlaneWave - swid.vv_ID) != 0:
print("TEST FAILED: vv_ID_PlaneWave - swid.vv_ID = "+str(sp.simplify(vv_ID_PlaneWave - swid.vv_ID))+"\t\t (should be zero)")
sys.exit(1)
print("TESTS PASSED!")
# Step 8: Consistency check between the tutorial notebook above
# and the SphericalGaussian option from within the
# ScalarWave/InitialData.py module.
swid.InitialData(Type="SphericalGaussian")
print("Consistency check between ScalarWave tutorial and NRPy+ module: SphericalGaussian Case")
if sp.simplify(uu_ID_SphericalGaussian - swid.uu_ID) != 0:
print("TEST FAILED: uu_ID_SphericalGaussian - swid.uu_ID = "+str(sp.simplify(uu_ID_SphericalGaussian - swid.uu_ID))+"\t\t (should be zero)")
sys.exit(1)
if sp.simplify(vv_ID_SphericalGaussian - swid.vv_ID) != 0:
print("TEST FAILED: vv_ID_SphericalGaussian - swid.vv_ID = "+str(sp.simplify(vv_ID_SphericalGaussian - swid.vv_ID))+"\t\t (should be zero)")
sys.exit(1)
print("TESTS PASSED!")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module: PlaneWave Case
TESTS PASSED!
Consistency check between ScalarWave tutorial and NRPy+ module: SphericalGaussian Case
TESTS PASSED!
###Markdown
Step 6: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-ScalarWave.pdf](Tutorial-ScalarWave.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ScalarWave")
###Output
Created Tutorial-ScalarWave.tex, and compiled LaTeX file to PDF file
Tutorial-ScalarWave.pdf
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Generating C Code for the Scalar Wave Equation in Cartesian Coordinates Author: Zach Etienne Formatting improvements courtesy Brandon Clark This module generates the C Code for the Scalarwave in Cartesian coordinates and sets up [Plane Wave Initial Data](https://en.wikipedia.org/wiki/Initial_value_problem). **Notebook Status:** Validated **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](code_validation). In addition, all expressions have been validated against a trusted code (the [original SENR/NRPy+ code](https://bitbucket.org/zach_etienne/nrpy)). NRPy+ Source Code for this module: * [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py)* [ScalarWave/InitialData_PlaneWave.py](../edit/ScalarWave/InitialData_PlaneWave.py) Introduction: Problem StatementWe wish to numerically solve the scalar wave equation as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem) in Cartesian coordinates:$$\partial_t^2 u = c^2 \nabla^2 u \text{,}$$where $u$ (the amplitude of the wave) is a function of time and space: $u = u(t,x,y,...)$ (spatial dimension as-yet unspecified) and $c$ is the wave speed, subject to some initial condition$$u(0,x,y,...) = f(x,y,...)$$and suitable spatial boundary conditions.As described in the next section, we will find it quite useful to define$$v(t,x,y,...) = \partial_t u(t,x,y,...).$$In this way, the second-order PDE is reduced to a set of two coupled first-order PDEs\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \nabla^2 u.\end{align}We will use NRPy+ to generate efficient C codes capable of generating both initial data $u(0,x,y,...) = f(x,y,...)$; $v(0,x,y,...)=g(x,y,...)$, as well as finite-difference expressions for the right-hand sides of the above expressions. These expressions are needed within the *Method of Lines* to "integrate" the solution forward in time. The Method of LinesOnce we have initial data, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs)**, so long as the initial value problem PDE can be written in the form$$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$where $\mathbf{M}$ is an $N\times N$ matrix filled with differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$. The scalar wave equation as written in the [previous module](Tutorial-ScalarWave.ipynb)\begin{equation}\partial_t \begin{bmatrix}u \\v \end{bmatrix}=\begin{bmatrix}0 & 1 \\c^2 \nabla^2 & 0 \end{bmatrix}\begin{bmatrix}u \\v \end{bmatrix}\end{equation}satisfies this requirement. Thus we can treat the spatial derivatives $\nabla^2 u$ of the scalar wave equation using **standard finite-difference approaches**, and the temporal derivatives $\partial_t u$ and $\partial_t v$ using **standard approaches for solving ODEs**. In [the next module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will apply the highly robust [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4), used widely for numerically solving ODEs, to "march" (integrate) the solution vector $\vec{f}$ forward in time from its initial value ("initial data"). Basic AlgorithmThe basic algorithm for solving the scalar wave equation [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), based on the Method of Lines (see section above) is outlined below, with NRPy+-based components highlighted in green. We will review how NRPy+ generates these core components in this module.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. Set gridfunction values to initial data.1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. Evaluate scalar wave RHS expressions. 1. Apply boundary conditions.**We refer to the right-hand side of the equation $\partial_t \vec{f} = \mathbf{M}\ \vec{f}$ as the RHS. In this case, we refer to the $\mathbf{M}\ \vec{f}$ as the "scalar wave RHSs".** In the following sections we will 1. Use NRPy+ to cast the scalar wave RHS expressions -- in finite difference form -- into highly efficient C code, 1. first in one spatial dimension with fourth-order finite differences, 1. and then in three spatial dimensions with tenth-order finite differences.1. Use NRPy+ to generate monochromatic plane-wave initial data for the scalar wave equation, where the wave propagates in an arbitrary direction.As for the $\nabla^2 u$ term, spatial derivatives are handled in NRPy+ via [finite differencing](https://en.wikipedia.org/wiki/Finite_difference).We will sample the solution $\{u,v\}$ at discrete, uniformly-sampled points in space and time. For simplicity, let's assume that we consider the wave equation in one spatial dimension. Then the solution at any sampled point in space and time is given by$$u^n_i = u(t_n,x_i) = u(t_0 + n \Delta t, x_0 + i \Delta x),$$where $\Delta t$ and $\Delta x$ represent the temporal and spatial resolution, respectively. $v^n_i$ is sampled at the same points in space and time. Table of Contents$$\label{toc}$$1. [Step 1](initializenrpy): Initialize core NRPy+ modules1. [Step 2](rhss1d): Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing1. [Step 3](rhss3d): Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing 1. [Step 3.a](code_validation1): Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module1. [Step 4](planewavesoln): Plane-Wave Solution of the Scalar Wave Equation 1. [Step 4.a](code_validation2): Code Validation against `ScalarWave.InitialData_PlaneWave` NRPy+ module1. [Step 5](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Initialize core NRPy+ modules \[Back to [top](toc)\]$$\label{initializenrpy}$$Let's start by importing all the needed modules from NRPy+:
###Code
# Step P1: Import needed NRPy+ core modules:
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import finite_difference as fin
from outputC import *
###Output
_____no_output_____
###Markdown
Step 2: Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss1d}$$To minimize complication, we will first restrict ourselves to solving the wave equation in one spatial dimension, so$$\nabla^2 u = \partial_x^2 u.$$Extension of this operator to higher spatial dimensions is straightforward, particularly when using NRPy+.As was discussed in [the finite difference section of the tutorial](Tutorial-Finite_Difference_Derivatives.ipynb), NRPy+ approximates derivatives using [finite difference methods](), the second-order derivative $\partial_x^2$ accurate to fourth-order in uniform grid spacing $\Delta x$ (from fitting the unique 4th-degree polynomial to 5 sample points of $u$) is given by\begin{equation}\left[\partial_x^2 u(t,x)\right]_j = \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right)+ \mathcal{O}\left((\Delta x)^4\right).\end{equation}
###Code
# Step P2: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
thismodule = "ScalarWave"
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 1: Set the spatial dimension parameter, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",1)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4)
# Step 3: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 4: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 5: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
vv_rhs = sp.simplify(vv_rhs)
# Step 6: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)])
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expression:
* "const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1/3 - uu_i0m2/12 + 4*uu_i0p1/3 - uu_i0p2/12)"
*/
const double uu_i0m2 = in_gfs[IDX2(UUGF, i0-2)];
const double uu_i0m1 = in_gfs[IDX2(UUGF, i0-1)];
const double uu = in_gfs[IDX2(UUGF, i0)];
const double uu_i0p1 = in_gfs[IDX2(UUGF, i0+1)];
const double uu_i0p2 = in_gfs[IDX2(UUGF, i0+2)];
const double vv = in_gfs[IDX2(VVGF, i0)];
const double uu_dDD00 = ((invdx0)*(invdx0))*(-5.0/2.0*uu + (4.0/3.0)*uu_i0m1 - 1.0/12.0*uu_i0m2 + (4.0/3.0)*uu_i0p1 - 1.0/12.0*uu_i0p2);
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[rhs_gfs[IDX2(UUGF, i0)] = vv,
* rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*wavespeed**2]"
*/
rhs_gfs[IDX2(UUGF, i0)] = vv;
rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*((wavespeed)*(wavespeed));
}
###Markdown
**Success!** Notice that indeed NRPy+ was able to compute the spatial derivative operator,\begin{equation}\left[\partial_x^2 u(t,x)\right]_j \approx \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right),\end{equation}correctly (easier to read in the "Original SymPy expressions" comment block at the top of the C output. Note that `invdx0`$=1/\Delta x_0$, where $\Delta x_0$ is the (uniform) grid spacing in the zeroth, or $x_0$ direction. Step 3: Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss3d}$$Let's next repeat the same process, only this time at **10th** finite difference order, for the **3-spatial-dimension** scalar wave equation, with SIMD enabled:
###Code
# Step 1: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 2: Set the spatial dimension parameter
# to *FOUR* this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 3: Set the finite differencing order to 10.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",10)
# Step 4a: Reset gridfunctions registered in 1D case above,
# to avoid NRPy+ throwing an error about double-
# registering gridfunctions, which is not allowed.
gri.glb_gridfcs_list = []
# Step 4b: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 5: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 6: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
# Step 7: Simplify the expression for c^2 \nabla^2 u (a.k.a., vv_rhs):
vv_rhs = sp.simplify(vv_rhs)
# Step 8: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)],params="SIMD_enable=True")
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY uu_dDD00 = invdx0**2*(-5269*uu/1800 + 5*uu_i0m1_i1_i2/3 - 5*uu_i0m2_i1_i2/21 + 5*uu_i0m3_i1_i2/126 - 5*uu_i0m4_i1_i2/1008 + uu_i0m5_i1_i2/3150 + 5*uu_i0p1_i1_i2/3 - 5*uu_i0p2_i1_i2/21 + 5*uu_i0p3_i1_i2/126 - 5*uu_i0p4_i1_i2/1008 + uu_i0p5_i1_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD11 = invdx1**2*(-5269*uu/1800 + 5*uu_i0_i1m1_i2/3 - 5*uu_i0_i1m2_i2/21 + 5*uu_i0_i1m3_i2/126 - 5*uu_i0_i1m4_i2/1008 + uu_i0_i1m5_i2/3150 + 5*uu_i0_i1p1_i2/3 - 5*uu_i0_i1p2_i2/21 + 5*uu_i0_i1p3_i2/126 - 5*uu_i0_i1p4_i2/1008 + uu_i0_i1p5_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD22 = invdx2**2*(-5269*uu/1800 + 5*uu_i0_i1_i2m1/3 - 5*uu_i0_i1_i2m2/21 + 5*uu_i0_i1_i2m3/126 - 5*uu_i0_i1_i2m4/1008 + uu_i0_i1_i2m5/3150 + 5*uu_i0_i1_i2p1/3 - 5*uu_i0_i1_i2p2/21 + 5*uu_i0_i1_i2p3/126 - 5*uu_i0_i1_i2p4/1008 + uu_i0_i1_i2p5/3150)]"
*/
const REAL_SIMD_ARRAY uu_i0_i1_i2m5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-5)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-1)]);
const REAL_SIMD_ARRAY uu_i0_i1m5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-1,i2)]);
const REAL_SIMD_ARRAY uu_i0m5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-1,i1,i2)]);
const REAL_SIMD_ARRAY uu = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+1,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+1)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+5)]);
const REAL_SIMD_ARRAY vv = ReadSIMD(&in_gfs[IDX4(VVGF, i0,i1,i2)]);
const double tmpFD_Rational_1_3150_FDcoeff = 0.0003174603174603174603174603174603175;
const REAL_SIMD_ARRAY _Rational_1_3150_FDcoeff = ConstSIMD(tmpFD_Rational_1_3150_FDcoeff);
const double tmpFD_Rational_5_126_FDcoeff = 0.03968253968253968253968253968253968;
const REAL_SIMD_ARRAY _Rational_5_126_FDcoeff = ConstSIMD(tmpFD_Rational_5_126_FDcoeff);
const double tmpFD_Rational_5_3_FDcoeff = 1.666666666666666666666666666666667;
const REAL_SIMD_ARRAY _Rational_5_3_FDcoeff = ConstSIMD(tmpFD_Rational_5_3_FDcoeff);
const double tmpFD_Rational_m5269_1800_FDcoeff = -2.927222222222222222222222222222222;
const REAL_SIMD_ARRAY _Rational_m5269_1800_FDcoeff = ConstSIMD(tmpFD_Rational_m5269_1800_FDcoeff);
const double tmpFD_Rational_m5_1008_FDcoeff = -0.004960317460317460317460317460317460;
const REAL_SIMD_ARRAY _Rational_m5_1008_FDcoeff = ConstSIMD(tmpFD_Rational_m5_1008_FDcoeff);
const double tmpFD_Rational_m5_21_FDcoeff = -0.2380952380952380952380952380952381;
const REAL_SIMD_ARRAY _Rational_m5_21_FDcoeff = ConstSIMD(tmpFD_Rational_m5_21_FDcoeff);
const REAL_SIMD_ARRAY tmpFD0 = MulSIMD(uu, _Rational_m5269_1800_FDcoeff);
const REAL_SIMD_ARRAY uu_dDD00 = MulSIMD(FusedMulAddSIMD(uu_i0p4_i1_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0p3_i1_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0p2_i1_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0p1_i1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0m5_i1_i2, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0m4_i1_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0m3_i1_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0m2_i1_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0m1_i1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0p5_i1_i2, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx0, invdx0));
const REAL_SIMD_ARRAY uu_dDD11 = MulSIMD(FusedMulAddSIMD(uu_i0_i1p4_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1p3_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1p2_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1p1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1m5_i2, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0_i1m4_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1m3_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1m2_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1m1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1p5_i2, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx1, invdx1));
const REAL_SIMD_ARRAY uu_dDD22 = MulSIMD(FusedMulAddSIMD(uu_i0_i1_i2p4, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p3, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p1, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m5, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m4, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m3, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m1, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p5, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx2, invdx2));
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY __RHS_exp_0 = vv,
* const REAL_SIMD_ARRAY __RHS_exp_1 = wavespeed**2*(uu_dDD00 + uu_dDD11 + uu_dDD22)]"
*/
const REAL_SIMD_ARRAY __RHS_exp_0 = vv;
const REAL_SIMD_ARRAY __RHS_exp_1 = MulSIMD(AddSIMD(uu_dDD11, AddSIMD(uu_dDD00, uu_dDD22)), MulSIMD(wavespeed, wavespeed));
WriteSIMD(&rhs_gfs[IDX4(UUGF, i0, i1, i2)], __RHS_exp_0);
WriteSIMD(&rhs_gfs[IDX4(VVGF, i0, i1, i2)], __RHS_exp_1);
}
###Markdown
Step 3.a: Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation1}$$Here, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the three-spatial-dimension Scalar Wave equation (i.e., `uu_rhs` and `vv_rhs`) between1. this tutorial and 2. the [NRPy+ ScalarWave.ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) module.
###Code
# Step 10: We already have SymPy expressions for uu_rhs and vv_rhs in
# terms of other SymPy variables. Even if we reset the list
# of NRPy+ gridfunctions, these *SymPy* expressions for
# uu_rhs and vv_rhs *will remain unaffected*.
#
# Here, we will use the above-defined uu_rhs and vv_rhs to
# validate against the same expressions in the
# ScalarWave/ScalarWave_RHSs.py module,
# to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the ScalarWave_RHSs.py module itself.
#
# Reset the list of gridfunctions, as registering a gridfunction
# twice will spawn an error.
gri.glb_gridfcs_list = []
# Step 11: Call the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module,
# which should do exactly the same as in Steps 1-10 above.
import ScalarWave.ScalarWave_RHSs as swrhs
swrhs.ScalarWave_RHSs()
# Step 12: Consistency check between the tutorial notebook above
# and the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_rhs - swrhs.uu_rhs = "+str(sp.simplify(uu_rhs - swrhs.uu_rhs))+"\t\t (should be zero)")
print("vv_rhs - swrhs.vv_rhs = "+str(sp.simplify(vv_rhs - swrhs.vv_rhs))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_rhs - swrhs.uu_rhs = 0 (should be zero)
vv_rhs - swrhs.vv_rhs = 0 (should be zero)
###Markdown
Step 4: Plane-Wave Solution of the Scalar Wave Equation \[Back to [top](toc)\]$$\label{planewavesoln}$$The solution to the scalar wave equation for a monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction is$$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$where $\hat{k}$ is a unit vector. We choose $f(\hat{k}\cdot\vec{x} - c t)$ to take the form$$f(\hat{k}\cdot\vec{x} - c t) = \sin\left(\hat{k}\cdot\vec{x} - c t\right) + 2,$$where we add the $+2$ to ensure that the exact solution never crosses through zero. In places where the exact solution passes through zero, the relative error (i.e., the measure of error to compare numerical with exact results) is undefined. Also, $f(\hat{k}\cdot\vec{x} - c t)$ plus a constant is still a solution to the wave equation.
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
kk = par.Cparameters("REAL", thismodule, ["kk0", "kk1", "kk2"],[1.0,1.0,1.0])
# Step 3: Normalize the k vector
kk_norm = sp.sqrt(kk[0]**2 + kk[1]**2 + kk[2]**2)
# Step 4: Compute k.x
dot_product = sp.sympify(0)
for i in range(DIM):
dot_product += xx[i]*kk[i]
dot_product /= kk_norm
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID = sp.sin(dot_product - wavespeed*time)+2
vv_ID = sp.diff(uu_ID, time)
###Output
_____no_output_____
###Markdown
Next we verify that $f(\hat{k}\cdot\vec{x} - c t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\ f\left(\hat{k}\cdot\vec{x} - c t\right),$$and confirming the result is exactly zero.
###Code
sp.simplify(wavespeed**2*(sp.diff(uu_ID,xx[0],2) +
sp.diff(uu_ID,xx[1],2) +
sp.diff(uu_ID,xx[2],2))
- sp.diff(uu_ID,time,2))
###Output
_____no_output_____
###Markdown
Step 4.a: Code Validation against `ScalarWave.InitialData_PlaneWave` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation2}$$As a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between1. this tutorial and 2. the NRPy+ [ScalarWave.InitialData_PlaneWave](../edit/ScalarWave/InitialData_PlaneWave.py) module.
###Code
# We just defined SymPy expressions for uu_ID and vv_ID in
# terms of other SymPy variables. Here, we will use the
# above-defined uu_ID and vv_ID to validate against the
# same expressions in the ScalarWave/InitialData_PlaneWave.py
# module, to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the PlaneWave ID module itself.
#
# Step 6: Call the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module,
# which should do exactly the same as in Steps 1-5 above.
import ScalarWave.InitialData_PlaneWave as swid
swid.InitialData_PlaneWave()
# Step 7: Consistency check between the tutorial notebook above
# and the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_ID - swid.uu_ID = "+str(sp.simplify(uu_ID - swid.uu_ID))+"\t\t (should be zero)")
print("vv_ID - swid.vv_ID = "+str(sp.simplify(vv_ID - swid.vv_ID))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_ID - swid.uu_ID = 0 (should be zero)
vv_ID - swid.vv_ID = 0 (should be zero)
###Markdown
Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-ScalarWave.pdf](Tutorial-ScalarWave.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-ScalarWave.ipynb
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!rm -f Tut*.out Tut*.aux Tut*.log
###Output
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Generating C Code for the Scalar Wave Equation in Cartesian Coordinates Author: Zach Etienne Formatting improvements courtesy Brandon Clark This module generates the C Code for the Scalarwave in Cartesian coordinates and sets up [Plane Wave Initial Data](https://en.wikipedia.org/wiki/Initial_value_problem). **Notebook Status:** Validated **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](code_validation). In addition, all expressions have been validated against a trusted code (the [original SENR/NRPy+ code](https://bitbucket.org/zach_etienne/nrpy)). NRPy+ Source Code for this module: * [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py)* [ScalarWave/InitialData_PlaneWave.py](../edit/ScalarWave/InitialData_PlaneWave.py) Introduction: Problem StatementWe wish to numerically solve the scalar wave equation as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem) in Cartesian coordinates:$$\partial_t^2 u = c^2 \nabla^2 u \text{,}$$where $u$ (the amplitude of the wave) is a function of time and space: $u = u(t,x,y,...)$ (spatial dimension as-yet unspecified) and $c$ is the wave speed, subject to some initial condition$$u(0,x,y,...) = f(x,y,...)$$and suitable spatial boundary conditions.As described in the next section, we will find it quite useful to define$$v(t,x,y,...) = \partial_t u(t,x,y,...).$$In this way, the second-order PDE is reduced to a set of two coupled first-order PDEs\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \nabla^2 u.\end{align}We will use NRPy+ to generate efficient C codes capable of generating both initial data $u(0,x,y,...) = f(x,y,...)$; $v(0,x,y,...)=g(x,y,...)$, as well as finite-difference expressions for the right-hand sides of the above expressions. These expressions are needed within the *Method of Lines* to "integrate" the solution forward in time. The Method of LinesOnce we have initial data, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs)**, so long as the initial value problem PDE can be written in the form$$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$where $\mathbf{M}$ is an $N\times N$ matrix filled with differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$. The scalar wave equation as written in the [previous module](Tutorial-ScalarWave.ipynb)\begin{equation}\partial_t \begin{bmatrix}u \\v \end{bmatrix}=\begin{bmatrix}0 & 1 \\c^2 \nabla^2 & 0 \end{bmatrix}\begin{bmatrix}u \\v \end{bmatrix}\end{equation}satisfies this requirement. Thus we can treat the spatial derivatives $\nabla^2 u$ of the scalar wave equation using **standard finite-difference approaches**, and the temporal derivatives $\partial_t u$ and $\partial_t v$ using **standard approaches for solving ODEs**. In [the next module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will apply the highly robust [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4), used widely for numerically solving ODEs, to "march" (integrate) the solution vector $\vec{f}$ forward in time from its initial value ("initial data"). Basic AlgorithmThe basic algorithm for solving the scalar wave equation [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), based on the Method of Lines (see section above) is outlined below, with NRPy+-based components highlighted in green. We will review how NRPy+ generates these core components in this module.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. Set gridfunction values to initial data.1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. Evaluate scalar wave RHS expressions. 1. Apply boundary conditions.**We refer to the right-hand side of the equation $\partial_t \vec{f} = \mathbf{M}\ \vec{f}$ as the RHS. In this case, we refer to the $\mathbf{M}\ \vec{f}$ as the "scalar wave RHSs".** In the following sections we will 1. Use NRPy+ to cast the scalar wave RHS expressions -- in finite difference form -- into highly efficient C code, 1. first in one spatial dimension with fourth-order finite differences, 1. and then in three spatial dimensions with tenth-order finite differences.1. Use NRPy+ to generate monochromatic plane-wave initial data for the scalar wave equation, where the wave propagates in an arbitrary direction.As for the $\nabla^2 u$ term, spatial derivatives are handled in NRPy+ via [finite differencing](https://en.wikipedia.org/wiki/Finite_difference).We will sample the solution $\{u,v\}$ at discrete, uniformly-sampled points in space and time. For simplicity, let's assume that we consider the wave equation in one spatial dimension. Then the solution at any sampled point in space and time is given by$$u^n_i = u(t_n,x_i) = u(t_0 + n \Delta t, x_0 + i \Delta x),$$where $\Delta t$ and $\Delta x$ represent the temporal and spatial resolution, respectively. $v^n_i$ is sampled at the same points in space and time. Table of Contents$$\label{toc}$$1. [Step 1](initializenrpy): Initialize core NRPy+ modules1. [Step 2](rhss1d): Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing1. [Step 3](rhss3d): Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing 1. [Step 3.a](code_validation1): Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module1. [Step 4](planewavesoln): Plane-Wave Solution of the Scalar Wave Equation 1. [Step 4.a](code_validation2): Code Validation against `ScalarWave.InitialData_PlaneWave` NRPy+ module1. [Step 5](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Initialize core NRPy+ modules \[Back to [top](toc)\]$$\label{initializenrpy}$$Let's start by importing all the needed modules from NRPy+:
###Code
# Step P1: Import needed NRPy+ core modules:
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import finite_difference as fin
from outputC import *
###Output
_____no_output_____
###Markdown
Step 2: Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss1d}$$To minimize complication, we will first restrict ourselves to solving the wave equation in one spatial dimension, so$$\nabla^2 u = \partial_x^2 u.$$Extension of this operator to higher spatial dimensions is straightforward, particularly when using NRPy+.As was discussed in [the finite difference section of the tutorial](Tutorial-Finite_Difference_Derivatives.ipynb), NRPy+ approximates derivatives using [finite difference methods](), the second-order derivative $\partial_x^2$ accurate to fourth-order in uniform grid spacing $\Delta x$ (from fitting the unique 4th-degree polynomial to 5 sample points of $u$) is given by\begin{equation}\left[\partial_x^2 u(t,x)\right]_j = \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right)+ \mathcal{O}\left((\Delta x)^4\right).\end{equation}
###Code
# Step P2: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
thismodule = "ScalarWave"
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 1: Set the spatial dimension parameter, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",1)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4)
# Step 3: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 4: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 5: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
vv_rhs = sp.simplify(vv_rhs)
# Step 6: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)])
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expression:
* "const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1/3 - uu_i0m2/12 + 4*uu_i0p1/3 - uu_i0p2/12)"
*/
const double uu_i0m2 = in_gfs[IDX2(UUGF, i0-2)];
const double uu_i0m1 = in_gfs[IDX2(UUGF, i0-1)];
const double uu = in_gfs[IDX2(UUGF, i0)];
const double uu_i0p1 = in_gfs[IDX2(UUGF, i0+1)];
const double uu_i0p2 = in_gfs[IDX2(UUGF, i0+2)];
const double vv = in_gfs[IDX2(VVGF, i0)];
const double uu_dDD00 = ((invdx0)*(invdx0))*(-5.0/2.0*uu + (4.0/3.0)*uu_i0m1 - 1.0/12.0*uu_i0m2 + (4.0/3.0)*uu_i0p1 - 1.0/12.0*uu_i0p2);
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[rhs_gfs[IDX2(UUGF, i0)] = vv,
* rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*wavespeed**2]"
*/
rhs_gfs[IDX2(UUGF, i0)] = vv;
rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*((wavespeed)*(wavespeed));
}
###Markdown
**Success!** Notice that indeed NRPy+ was able to compute the spatial derivative operator,\begin{equation}\left[\partial_x^2 u(t,x)\right]_j \approx \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right),\end{equation}correctly (easier to read in the "Original SymPy expressions" comment block at the top of the C output. Note that `invdx0`$=1/\Delta x_0$, where $\Delta x_0$ is the (uniform) grid spacing in the zeroth, or $x_0$ direction. Step 3: Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss3d}$$Let's next repeat the same process, only this time at **10th** finite difference order, for the **3-spatial-dimension** scalar wave equation, with SIMD enabled:
###Code
# Step 1: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 2: Set the spatial dimension parameter
# to *FOUR* this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 3: Set the finite differencing order to 10.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",10)
# Step 4a: Reset gridfunctions registered in 1D case above,
# to avoid NRPy+ throwing an error about double-
# registering gridfunctions, which is not allowed.
gri.glb_gridfcs_list = []
# Step 4b: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 5: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 6: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
# Step 7: Simplify the expression for c^2 \nabla^2 u (a.k.a., vv_rhs):
vv_rhs = sp.simplify(vv_rhs)
# Step 8: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)],params="SIMD_enable=True")
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY uu_dDD00 = invdx0**2*(-5269*uu/1800 + 5*uu_i0m1_i1_i2/3 - 5*uu_i0m2_i1_i2/21 + 5*uu_i0m3_i1_i2/126 - 5*uu_i0m4_i1_i2/1008 + uu_i0m5_i1_i2/3150 + 5*uu_i0p1_i1_i2/3 - 5*uu_i0p2_i1_i2/21 + 5*uu_i0p3_i1_i2/126 - 5*uu_i0p4_i1_i2/1008 + uu_i0p5_i1_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD11 = invdx1**2*(-5269*uu/1800 + 5*uu_i0_i1m1_i2/3 - 5*uu_i0_i1m2_i2/21 + 5*uu_i0_i1m3_i2/126 - 5*uu_i0_i1m4_i2/1008 + uu_i0_i1m5_i2/3150 + 5*uu_i0_i1p1_i2/3 - 5*uu_i0_i1p2_i2/21 + 5*uu_i0_i1p3_i2/126 - 5*uu_i0_i1p4_i2/1008 + uu_i0_i1p5_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD22 = invdx2**2*(-5269*uu/1800 + 5*uu_i0_i1_i2m1/3 - 5*uu_i0_i1_i2m2/21 + 5*uu_i0_i1_i2m3/126 - 5*uu_i0_i1_i2m4/1008 + uu_i0_i1_i2m5/3150 + 5*uu_i0_i1_i2p1/3 - 5*uu_i0_i1_i2p2/21 + 5*uu_i0_i1_i2p3/126 - 5*uu_i0_i1_i2p4/1008 + uu_i0_i1_i2p5/3150)]"
*/
const REAL_SIMD_ARRAY uu_i0_i1_i2m5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-5)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-1)]);
const REAL_SIMD_ARRAY uu_i0_i1m5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-1,i2)]);
const REAL_SIMD_ARRAY uu_i0m5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-1,i1,i2)]);
const REAL_SIMD_ARRAY uu = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+1,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+1)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+5)]);
const REAL_SIMD_ARRAY vv = ReadSIMD(&in_gfs[IDX4(VVGF, i0,i1,i2)]);
const double tmpFD_Rational_1_3150_FDcoeff = 0.0003174603174603174603174603174603175;
const REAL_SIMD_ARRAY _Rational_1_3150_FDcoeff = ConstSIMD(tmpFD_Rational_1_3150_FDcoeff);
const double tmpFD_Rational_5_126_FDcoeff = 0.03968253968253968253968253968253968;
const REAL_SIMD_ARRAY _Rational_5_126_FDcoeff = ConstSIMD(tmpFD_Rational_5_126_FDcoeff);
const double tmpFD_Rational_5_3_FDcoeff = 1.666666666666666666666666666666667;
const REAL_SIMD_ARRAY _Rational_5_3_FDcoeff = ConstSIMD(tmpFD_Rational_5_3_FDcoeff);
const double tmpFD_Rational_m5269_1800_FDcoeff = -2.927222222222222222222222222222222;
const REAL_SIMD_ARRAY _Rational_m5269_1800_FDcoeff = ConstSIMD(tmpFD_Rational_m5269_1800_FDcoeff);
const double tmpFD_Rational_m5_1008_FDcoeff = -0.004960317460317460317460317460317460;
const REAL_SIMD_ARRAY _Rational_m5_1008_FDcoeff = ConstSIMD(tmpFD_Rational_m5_1008_FDcoeff);
const double tmpFD_Rational_m5_21_FDcoeff = -0.2380952380952380952380952380952381;
const REAL_SIMD_ARRAY _Rational_m5_21_FDcoeff = ConstSIMD(tmpFD_Rational_m5_21_FDcoeff);
const REAL_SIMD_ARRAY tmpFD0 = MulSIMD(uu, _Rational_m5269_1800_FDcoeff);
const REAL_SIMD_ARRAY uu_dDD00 = MulSIMD(FusedMulAddSIMD(uu_i0p4_i1_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0p3_i1_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0p2_i1_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0p1_i1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0m5_i1_i2, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0m4_i1_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0m3_i1_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0m2_i1_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0m1_i1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0p5_i1_i2, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx0, invdx0));
const REAL_SIMD_ARRAY uu_dDD11 = MulSIMD(FusedMulAddSIMD(uu_i0_i1p4_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1p3_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1p2_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1p1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1m5_i2, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0_i1m4_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1m3_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1m2_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1m1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1p5_i2, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx1, invdx1));
const REAL_SIMD_ARRAY uu_dDD22 = MulSIMD(FusedMulAddSIMD(uu_i0_i1_i2p4, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p3, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p1, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m5, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m4, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m3, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m1, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p5, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx2, invdx2));
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY __RHS_exp_0 = vv,
* const REAL_SIMD_ARRAY __RHS_exp_1 = wavespeed**2*(uu_dDD00 + uu_dDD11 + uu_dDD22)]"
*/
const REAL_SIMD_ARRAY __RHS_exp_0 = vv;
const REAL_SIMD_ARRAY __RHS_exp_1 = MulSIMD(AddSIMD(uu_dDD11, AddSIMD(uu_dDD00, uu_dDD22)), MulSIMD(wavespeed, wavespeed));
WriteSIMD(&rhs_gfs[IDX4(UUGF, i0, i1, i2)], __RHS_exp_0);
WriteSIMD(&rhs_gfs[IDX4(VVGF, i0, i1, i2)], __RHS_exp_1);
}
###Markdown
Step 3.a: Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation1}$$Here, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the three-spatial-dimension Scalar Wave equation (i.e., `uu_rhs` and `vv_rhs`) between1. this tutorial and 2. the [NRPy+ ScalarWave.ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) module.
###Code
# Step 10: We already have SymPy expressions for uu_rhs and vv_rhs in
# terms of other SymPy variables. Even if we reset the list
# of NRPy+ gridfunctions, these *SymPy* expressions for
# uu_rhs and vv_rhs *will remain unaffected*.
#
# Here, we will use the above-defined uu_rhs and vv_rhs to
# validate against the same expressions in the
# ScalarWave/ScalarWave_RHSs.py module,
# to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the ScalarWave_RHSs.py module itself.
#
# Reset the list of gridfunctions, as registering a gridfunction
# twice will spawn an error.
gri.glb_gridfcs_list = []
# Step 11: Call the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module,
# which should do exactly the same as in Steps 1-10 above.
import ScalarWave.ScalarWave_RHSs as swrhs
swrhs.ScalarWave_RHSs()
# Step 12: Consistency check between the tutorial notebook above
# and the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_rhs - swrhs.uu_rhs = "+str(sp.simplify(uu_rhs - swrhs.uu_rhs))+"\t\t (should be zero)")
print("vv_rhs - swrhs.vv_rhs = "+str(sp.simplify(vv_rhs - swrhs.vv_rhs))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_rhs - swrhs.uu_rhs = 0 (should be zero)
vv_rhs - swrhs.vv_rhs = 0 (should be zero)
###Markdown
Step 4: Plane-Wave Solution of the Scalar Wave Equation \[Back to [top](toc)\]$$\label{planewavesoln}$$The solution to the scalar wave equation for a monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction is$$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$where $\hat{k}$ is a unit vector. We choose $f(\hat{k}\cdot\vec{x} - c t)$ to take the form$$f(\hat{k}\cdot\vec{x} - c t) = \sin\left(\hat{k}\cdot\vec{x} - c t\right) + 2,$$where we add the $+2$ to ensure that the exact solution never crosses through zero. In places where the exact solution passes through zero, the relative error (i.e., the measure of error to compare numerical with exact results) is undefined. Also, $f(\hat{k}\cdot\vec{x} - c t)$ plus a constant is still a solution to the wave equation.
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
kk = par.Cparameters("REAL", thismodule, ["kk0", "kk1", "kk2"],[1.0,1.0,1.0])
# Step 3: Normalize the k vector
kk_norm = sp.sqrt(kk[0]**2 + kk[1]**2 + kk[2]**2)
# Step 4: Compute k.x
dot_product = sp.sympify(0)
for i in range(DIM):
dot_product += xx[i]*kk[i]
dot_product /= kk_norm
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID = sp.sin(dot_product - wavespeed*time)+2
vv_ID = sp.diff(uu_ID, time)
###Output
_____no_output_____
###Markdown
Next we verify that $f(\hat{k}\cdot\vec{x} - c t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\ f\left(\hat{k}\cdot\vec{x} - c t\right),$$and confirming the result is exactly zero.
###Code
sp.simplify(wavespeed**2*(sp.diff(uu_ID,xx[0],2) +
sp.diff(uu_ID,xx[1],2) +
sp.diff(uu_ID,xx[2],2))
- sp.diff(uu_ID,time,2))
###Output
_____no_output_____
###Markdown
Step 4.a: Code Validation against `ScalarWave.InitialData_PlaneWave` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation2}$$As a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between1. this tutorial and 2. the NRPy+ [ScalarWave.InitialData_PlaneWave](../edit/ScalarWave/InitialData_PlaneWave.py) module.
###Code
# We just defined SymPy expressions for uu_ID and vv_ID in
# terms of other SymPy variables. Here, we will use the
# above-defined uu_ID and vv_ID to validate against the
# same expressions in the ScalarWave/InitialData_PlaneWave.py
# module, to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the PlaneWave ID module itself.
#
# Step 6: Call the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module,
# which should do exactly the same as in Steps 1-5 above.
import ScalarWave.InitialData_PlaneWave as swid
swid.InitialData_PlaneWave()
# Step 7: Consistency check between the tutorial notebook above
# and the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_ID - swid.uu_ID = "+str(sp.simplify(uu_ID - swid.uu_ID))+"\t\t (should be zero)")
print("vv_ID - swid.vv_ID = "+str(sp.simplify(vv_ID - swid.vv_ID))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_ID - swid.uu_ID = 0 (should be zero)
vv_ID - swid.vv_ID = 0 (should be zero)
###Markdown
Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-ScalarWave.pdf](Tutorial-ScalarWave.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-ScalarWave.ipynb
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!rm -f Tut*.out Tut*.aux Tut*.log
###Output
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Generating C Code for the Scalar Wave Equation in Cartesian Coordinates Authors: Zach Etienne & Thiago Assumpção Formatting improvements courtesy Brandon Clark This module generates the C Code for the Scalarwave in Cartesian coordinates and sets up either monochromatic plane wave or spherical Gaussian [Initial Data](https://en.wikipedia.org/wiki/Initial_value_problem). **Notebook Status:** Validated **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented below ([right-hand-side expressions](code_validation1); [initial data expressions](code_validation2)). In addition, all expressions have been validated against a trusted code (the [original SENR/NRPy+ code](https://bitbucket.org/zach_etienne/nrpy)). NRPy+ Source Code for this module: * [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py)* [ScalarWave/InitialData.py](../edit/ScalarWave/InitialData.py) Introduction: Problem StatementWe wish to numerically solve the scalar wave equation as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem) in Cartesian coordinates:$$\partial_t^2 u = c^2 \nabla^2 u \text{,}$$where $u$ (the amplitude of the wave) is a function of time and space: $u = u(t,x,y,...)$ (spatial dimension as-yet unspecified) and $c$ is the wave speed, subject to some initial condition$$u(0,x,y,...) = f(x,y,...)$$and suitable spatial boundary conditions.As described in the next section, we will find it quite useful to define$$v(t,x,y,...) = \partial_t u(t,x,y,...).$$In this way, the second-order PDE is reduced to a set of two coupled first-order PDEs\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \nabla^2 u.\end{align}We will use NRPy+ to generate efficient C codes capable of generating both initial data $u(0,x,y,...) = f(x,y,...)$; $v(0,x,y,...)=g(x,y,...)$, as well as finite-difference expressions for the right-hand sides of the above expressions. These expressions are needed within the *Method of Lines* to "integrate" the solution forward in time. The Method of LinesOnce we have initial data, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs)**, so long as the initial value problem PDE can be written in the form$$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$where $\mathbf{M}$ is an $N\times N$ matrix filled with differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$. The scalar wave equation as written in the [previous module](Tutorial-ScalarWave.ipynb)\begin{equation}\partial_t \begin{bmatrix}u \\v \end{bmatrix}=\begin{bmatrix}0 & 1 \\c^2 \nabla^2 & 0 \end{bmatrix}\begin{bmatrix}u \\v \end{bmatrix}\end{equation}satisfies this requirement. Thus we can treat the spatial derivatives $\nabla^2 u$ of the scalar wave equation using **standard finite-difference approaches**, and the temporal derivatives $\partial_t u$ and $\partial_t v$ using **standard approaches for solving ODEs**. In [the next module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will apply the highly robust [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4), used widely for numerically solving ODEs, to "march" (integrate) the solution vector $\vec{f}$ forward in time from its initial value ("initial data"). Basic AlgorithmThe basic algorithm for solving the scalar wave equation [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), based on the Method of Lines (see section above) is outlined below, with NRPy+-based components highlighted in green. We will review how NRPy+ generates these core components in this module.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. Set gridfunction values to initial data.1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. Evaluate scalar wave RHS expressions. 1. Apply boundary conditions.**We refer to the right-hand side of the equation $\partial_t \vec{f} = \mathbf{M}\ \vec{f}$ as the RHS. In this case, we refer to the $\mathbf{M}\ \vec{f}$ as the "scalar wave RHSs".** In the following sections we will 1. Use NRPy+ to cast the scalar wave RHS expressions -- in finite difference form -- into highly efficient C code, 1. first in one spatial dimension with fourth-order finite differences, 1. and then in three spatial dimensions with tenth-order finite differences.1. Use NRPy+ to generate monochromatic plane-wave initial data for the scalar wave equation, where the wave propagates in an arbitrary direction.As for the $\nabla^2 u$ term, spatial derivatives are handled in NRPy+ via [finite differencing](https://en.wikipedia.org/wiki/Finite_difference).We will sample the solution $\{u,v\}$ at discrete, uniformly-sampled points in space and time. For simplicity, let's assume that we consider the wave equation in one spatial dimension. Then the solution at any sampled point in space and time is given by$$u^n_i = u(t_n,x_i) = u(t_0 + n \Delta t, x_0 + i \Delta x),$$where $\Delta t$ and $\Delta x$ represent the temporal and spatial resolution, respectively. $v^n_i$ is sampled at the same points in space and time. Table of Contents$$\label{toc}$$1. [Step 1](initializenrpy): Initialize core NRPy+ modules1. [Step 2](rhss1d): Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing 1. [Step 2.a](ccode1d): C-code output example: Scalar wave RHSs with 4th order finite difference stencils1. [Step 3](rhss3d): Scalar Wave RHSs in Three Spatial Dimensions 1. [Step 3.a](code_validation1): Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module 1. [Step 3.b](ccode3d): C-code output example: Scalar wave RHSs with 10th order finite difference stencils and SIMD enabled1. [Step 4](id): Setting up Initial Data for the Scalar Wave Equation 1. [Step 4.a](planewave): The Monochromatic Plane-Wave Solution 1. [Step 4.b](sphericalgaussian): The Spherical Gaussian Solution (*Courtesy Thiago Assumpção*)1. [Step 5](code_validation2): Code Validation against `ScalarWave.InitialData` NRPy+ module1. [Step 6](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Initialize core NRPy+ modules \[Back to [top](toc)\]$$\label{initializenrpy}$$Let's start by importing all the needed modules from NRPy+:
###Code
# Step P1: Import needed NRPy+ core modules:
import NRPy_param_funcs as par # NRPy+: Parameter interface
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import grid as gri # NRPy+: Functions having to do with numerical grids
import finite_difference as fin # NRPy+: Finite difference C code generation module
from outputC import lhrh # NRPy+: Core C code output module
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
###Output
_____no_output_____
###Markdown
Step 2: Scalar Wave RHSs in One Spatial Dimension \[Back to [top](toc)\]$$\label{rhss1d}$$To minimize complication, we will first restrict ourselves to solving the wave equation in one spatial dimension, so\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \nabla^2 u \\ &= c^2 \partial_x^2 u.\end{align}We will construct SymPy expressions of the right-hand sides of $u$ and $v$ using [NRPy+ finite-difference notation](Tutorial-Finite_Difference_Derivatives.ipynb) to represent the derivative, so that finite-difference C-code kernels can be easily constructed.Extension of this operator to higher spatial dimensions when using NRPy+ is straightforward, as we will see below.
###Code
# Step P2: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
thismodule = "ScalarWave"
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 1: Set the spatial dimension parameter, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM", 1)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL", ["uu", "vv"])
# Step 3: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD", "sym01")
# Step 4: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
vv_rhs = sp.simplify(vv_rhs)
###Output
_____no_output_____
###Markdown
Step 2.a: C-code output example: Scalar wave RHSs with 4th order finite difference stencils \[Back to [top](toc)\]$$\label{ccode1d}$$As was discussed in [the finite difference section of the tutorial](Tutorial-Finite_Difference_Derivatives.ipynb), NRPy+ approximates derivatives using [finite-difference methods](https://en.wikipedia.org/wiki/Finite_difference), the second-order derivative $\partial_x^2$ accurate to fourth-order in uniform grid spacing $\Delta x$ (from fitting the unique 4th-degree polynomial to 5 sample points of $u$) is given by\begin{equation}\left[\partial_x^2 u(t,x)\right]_j = \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right)+ \mathcal{O}\left((\Delta x)^4\right).\end{equation}
###Code
# Step 5: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", 4)
# Step 6: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs", "uu"), rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs", "vv"), rhs=vv_rhs)])
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expression:
* "const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1/3 - uu_i0m2/12 + 4*uu_i0p1/3 - uu_i0p2/12)"
*/
const double uu_i0m2 = in_gfs[IDX2(UUGF, i0-2)];
const double uu_i0m1 = in_gfs[IDX2(UUGF, i0-1)];
const double uu = in_gfs[IDX2(UUGF, i0)];
const double uu_i0p1 = in_gfs[IDX2(UUGF, i0+1)];
const double uu_i0p2 = in_gfs[IDX2(UUGF, i0+2)];
const double vv = in_gfs[IDX2(VVGF, i0)];
const double FDPart1_Rational_5_2 = 5.0/2.0;
const double FDPart1_Rational_1_12 = 1.0/12.0;
const double FDPart1_Rational_4_3 = 4.0/3.0;
const double uu_dDD00 = ((invdx0)*(invdx0))*(FDPart1_Rational_1_12*(-uu_i0m2 - uu_i0p2) + FDPart1_Rational_4_3*(uu_i0m1 + uu_i0p1) - FDPart1_Rational_5_2*uu);
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[rhs_gfs[IDX2(UUGF, i0)] = vv,
* rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*wavespeed**2]"
*/
rhs_gfs[IDX2(UUGF, i0)] = vv;
rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*((wavespeed)*(wavespeed));
}
###Markdown
**Success!** Notice that indeed NRPy+ was able to compute the spatial derivative operator,\begin{equation}\left[\partial_x^2 u(t,x)\right]_j \approx \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right),\end{equation}correctly (easier to read in the "Original SymPy expressions" comment block at the top of the C output.As NRPy+ is designed to generate codes in arbitrary coordinate systems, instead of sticking with Cartesian notation for 3D coordinates, $x,y,z$, we instead adopt $x_0,x_1,x_2$ for our coordinate labels. Thus you will notice the appearance of `invdx0`$=1/\Delta x_0$, where $\Delta x_0$ as the (uniform) grid spacing in the zeroth, or $x_0$ direction. In this case $x_0$ represents the $x$ direction. Step 3: Scalar Wave RHSs in Three Spatial Dimensions \[Back to [top](toc)\]$$\label{rhss3d}$$Let's next repeat the same process, only this time for the scalar wave equation in **3 spatial dimensions** (3D).
###Code
# Step 1: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
wavespeed = par.Cparameters("REAL", thismodule, "wavespeed", 1.0)
# Step 2: Set the spatial dimension parameter
# to *FOUR* this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM", 3)
DIM = par.parval_from_str("grid::DIM")
# Step 3a: Reset gridfunctions registered in 1D case above,
# to avoid NRPy+ throwing an error about double-
# registering gridfunctions, which is not allowed.
gri.glb_gridfcs_list = []
# Step 3b: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL", ["uu", "vv"])
# Step 4: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD", "sym01")
# Step 5: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
# Step 6: Simplify the expression for c^2 \nabla^2 u (a.k.a., vv_rhs):
vv_rhs = sp.simplify(vv_rhs)
###Output
_____no_output_____
###Markdown
Step 3.a: Validate SymPy expressions against `ScalarWave.ScalarWave_RHSs` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation1}$$Here, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the three-spatial-dimension Scalar Wave equation (i.e., `uu_rhs` and `vv_rhs`) between1. this tutorial and 2. the [NRPy+ ScalarWave.ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) module.
###Code
# Step 10: We already have SymPy expressions for uu_rhs and vv_rhs in
# terms of other SymPy variables. Even if we reset the list
# of NRPy+ gridfunctions, these *SymPy* expressions for
# uu_rhs and vv_rhs *will remain unaffected*.
#
# Here, we will use the above-defined uu_rhs and vv_rhs to
# validate against the same expressions in the
# ScalarWave/ScalarWave_RHSs.py module,
# to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the ScalarWave_RHSs.py module itself.
#
# Reset the list of gridfunctions, as registering a gridfunction
# twice will spawn an error.
gri.glb_gridfcs_list = []
# Step 11: Call the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module,
# which should do exactly the same as in Steps 1-10 above.
import ScalarWave.ScalarWave_RHSs as swrhs
swrhs.ScalarWave_RHSs()
# Step 12: Consistency check between the tutorial notebook above
# and the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_rhs - swrhs.uu_rhs = "+str(sp.simplify(uu_rhs - swrhs.uu_rhs))+"\t\t (should be zero)")
print("vv_rhs - swrhs.vv_rhs = "+str(sp.simplify(vv_rhs - swrhs.vv_rhs))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_rhs - swrhs.uu_rhs = 0 (should be zero)
vv_rhs - swrhs.vv_rhs = 0 (should be zero)
###Markdown
Step 3.b: C-code output example: Scalar wave RHSs with 10th order finite difference stencils and SIMD enabled \[Back to [top](toc)\]$$\label{ccode3d}$$Next we'll output the above expressions as Ccode, using the [NRPy+ finite-differencing C code kernel generation infrastructure](Tutorial-Finite_Difference_Derivatives.ipynb). This code will represent spatial derivatives as 10th-order finite differences and output the C code with [SIMD](https://en.wikipedia.org/wiki/SIMD) enabled. ([Common-subexpression elimination](https://en.wikipedia.org/wiki/Common_subexpression_elimination) is enabled by default.)
###Code
# Step 7: Set the finite differencing order to 10.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", 10)
# Step 8: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)], params="enable_SIMD=True")
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY uu_dDD00 = invdx0**2*(-5269*uu/1800 + 5*uu_i0m1_i1_i2/3 - 5*uu_i0m2_i1_i2/21 + 5*uu_i0m3_i1_i2/126 - 5*uu_i0m4_i1_i2/1008 + uu_i0m5_i1_i2/3150 + 5*uu_i0p1_i1_i2/3 - 5*uu_i0p2_i1_i2/21 + 5*uu_i0p3_i1_i2/126 - 5*uu_i0p4_i1_i2/1008 + uu_i0p5_i1_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD11 = invdx1**2*(-5269*uu/1800 + 5*uu_i0_i1m1_i2/3 - 5*uu_i0_i1m2_i2/21 + 5*uu_i0_i1m3_i2/126 - 5*uu_i0_i1m4_i2/1008 + uu_i0_i1m5_i2/3150 + 5*uu_i0_i1p1_i2/3 - 5*uu_i0_i1p2_i2/21 + 5*uu_i0_i1p3_i2/126 - 5*uu_i0_i1p4_i2/1008 + uu_i0_i1p5_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD22 = invdx2**2*(-5269*uu/1800 + 5*uu_i0_i1_i2m1/3 - 5*uu_i0_i1_i2m2/21 + 5*uu_i0_i1_i2m3/126 - 5*uu_i0_i1_i2m4/1008 + uu_i0_i1_i2m5/3150 + 5*uu_i0_i1_i2p1/3 - 5*uu_i0_i1_i2p2/21 + 5*uu_i0_i1_i2p3/126 - 5*uu_i0_i1_i2p4/1008 + uu_i0_i1_i2p5/3150)]"
*/
const REAL_SIMD_ARRAY uu_i0_i1_i2m5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-5)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-1)]);
const REAL_SIMD_ARRAY uu_i0_i1m5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-1,i2)]);
const REAL_SIMD_ARRAY uu_i0m5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-1,i1,i2)]);
const REAL_SIMD_ARRAY uu = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+1,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+1)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+5)]);
const REAL_SIMD_ARRAY vv = ReadSIMD(&in_gfs[IDX4(VVGF, i0,i1,i2)]);
const double tmpFDPart1_NegativeOne_ = -1.0;
const REAL_SIMD_ARRAY FDPart1_NegativeOne_ = ConstSIMD(tmpFDPart1_NegativeOne_);
const double tmpFDPart1_Rational_1_3150 = 1.0/3150.0;
const REAL_SIMD_ARRAY FDPart1_Rational_1_3150 = ConstSIMD(tmpFDPart1_Rational_1_3150);
const double tmpFDPart1_Rational_5269_1800 = 5269.0/1800.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5269_1800 = ConstSIMD(tmpFDPart1_Rational_5269_1800);
const double tmpFDPart1_Rational_5_1008 = 5.0/1008.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_1008 = ConstSIMD(tmpFDPart1_Rational_5_1008);
const double tmpFDPart1_Rational_5_126 = 5.0/126.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_126 = ConstSIMD(tmpFDPart1_Rational_5_126);
const double tmpFDPart1_Rational_5_21 = 5.0/21.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_21 = ConstSIMD(tmpFDPart1_Rational_5_21);
const double tmpFDPart1_Rational_5_3 = 5.0/3.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_3 = ConstSIMD(tmpFDPart1_Rational_5_3);
const REAL_SIMD_ARRAY FDPart1_0 = MulSIMD(FDPart1_Rational_5269_1800, uu);
const REAL_SIMD_ARRAY uu_dDD00 = MulSIMD(MulSIMD(invdx0, invdx0), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0m3_i1_i2, uu_i0p3_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0m1_i1_i2, uu_i0p1_i1_i2), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0m5_i1_i2, uu_i0p5_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0m4_i1_i2, uu_i0p4_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0m2_i1_i2, uu_i0p2_i1_i2), FDPart1_0))))));
const REAL_SIMD_ARRAY uu_dDD11 = MulSIMD(MulSIMD(invdx1, invdx1), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0_i1m3_i2, uu_i0_i1p3_i2), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0_i1m1_i2, uu_i0_i1p1_i2), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0_i1m5_i2, uu_i0_i1p5_i2), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0_i1m4_i2, uu_i0_i1p4_i2), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0_i1m2_i2, uu_i0_i1p2_i2), FDPart1_0))))));
const REAL_SIMD_ARRAY uu_dDD22 = MulSIMD(MulSIMD(invdx2, invdx2), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0_i1_i2m3, uu_i0_i1_i2p3), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0_i1_i2m1, uu_i0_i1_i2p1), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0_i1_i2m5, uu_i0_i1_i2p5), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0_i1_i2m4, uu_i0_i1_i2p4), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0_i1_i2m2, uu_i0_i1_i2p2), FDPart1_0))))));
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY __RHS_exp_0 = vv,
* const REAL_SIMD_ARRAY __RHS_exp_1 = wavespeed**2*(uu_dDD00 + uu_dDD11 + uu_dDD22)]"
*/
const REAL_SIMD_ARRAY __RHS_exp_0 = vv;
const REAL_SIMD_ARRAY __RHS_exp_1 = MulSIMD(MulSIMD(wavespeed, wavespeed), AddSIMD(uu_dDD00, AddSIMD(uu_dDD11, uu_dDD22)));
WriteSIMD(&rhs_gfs[IDX4(UUGF, i0, i1, i2)], __RHS_exp_0);
WriteSIMD(&rhs_gfs[IDX4(VVGF, i0, i1, i2)], __RHS_exp_1);
}
###Markdown
Step 4: Setting up Initial Data for the Scalar Wave Equation \[Back to [top](toc)\]$$\label{id}$$ Step 4.a: The Monochromatic Plane-Wave Solution \[Back to [top](toc)\]$$\label{planewave}$$The solution to the scalar wave equation for a monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction is$$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$where $\hat{k}$ is a unit vector. We choose $f(\hat{k}\cdot\vec{x} - c t)$ to take the form$$f(\hat{k}\cdot\vec{x} - c t) = \sin\left(\hat{k}\cdot\vec{x} - c t\right) + 2,$$where we add the $+2$ to ensure that the exact solution never crosses through zero. In places where the exact solution passes through zero, the relative error (i.e., the most common error measure used to check that the numerical solution converges to the exact solution) is undefined. Also, $f(\hat{k}\cdot\vec{x} - c t)$ plus a constant is still a solution to the wave equation.
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx # Sets the Cartesian coordinates xx[0]=x; xx[1]=y; xx[2]=z
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
kk = par.Cparameters("REAL", thismodule, ["kk0", "kk1", "kk2"],[1.0,1.0,1.0])
# Step 3: Normalize the k vector
kk_norm = sp.sqrt(kk[0]**2 + kk[1]**2 + kk[2]**2)
# Step 4: Compute k.x
dot_product = sp.sympify(0)
for i in range(DIM):
dot_product += xx[i]*kk[i]
dot_product /= kk_norm
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID_PlaneWave = sp.sin(dot_product - wavespeed*time)+2
vv_ID_PlaneWave = sp.diff(uu_ID_PlaneWave, time)
###Output
_____no_output_____
###Markdown
Next we verify that $f(\hat{k}\cdot\vec{x} - c t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\ f\left(\hat{k}\cdot\vec{x} - c t\right),$$and confirming the result is exactly zero.
###Code
sp.simplify(wavespeed**2*(sp.diff(uu_ID_PlaneWave,xx[0],2) +
sp.diff(uu_ID_PlaneWave,xx[1],2) +
sp.diff(uu_ID_PlaneWave,xx[2],2))
- sp.diff(uu_ID_PlaneWave,time,2))
###Output
_____no_output_____
###Markdown
Step 4.b: The Spherical Gaussian Solution \[Back to [top](toc)\]$$\label{sphericalgaussian}$$Here we will implement the spherical Gaussian solution, consists of ingoing and outgoing wave fronts:\begin{align}u(r,t) &= u_{\rm out}(r,t) + u_{\rm in}(r,t) + 1,\ \ \text{where}\\u_{\rm out}(r,t) &=\frac{r-ct}{r} \exp\left[\frac{-(r-ct)^2}{2 \sigma^2}\right] \\u_{\rm in}(r,t) &=\frac{r+ct}{r} \exp\left[\frac{-(r+ct)^2}{2 \sigma^2}\right] \\\end{align}where $c$ is the wavespeed, and $\sigma$ is the width of the Gaussian (i.e., the "standard deviation").
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx # Sets the Cartesian coordinates xx[0]=x; xx[1]=y; xx[2]=z
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
sigma = par.Cparameters("REAL", thismodule, "sigma",3.0)
# Step 4: Compute r
r = sp.sympify(0)
for i in range(DIM):
r += xx[i]**2
r = sp.sqrt(r)
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID_SphericalGaussianOUT = +(r - wavespeed*time)/r * sp.exp( -(r - wavespeed*time)**2 / (2*sigma**2) )
uu_ID_SphericalGaussianIN = +(r + wavespeed*time)/r * sp.exp( -(r + wavespeed*time)**2 / (2*sigma**2) )
uu_ID_SphericalGaussian = uu_ID_SphericalGaussianOUT + uu_ID_SphericalGaussianIN + sp.sympify(1)
vv_ID_SphericalGaussian = sp.diff(uu_ID_SphericalGaussian, time)
###Output
_____no_output_____
###Markdown
Since the wave equation is linear, both the leftgoing and rightgoing waves must satisfy the wave equation, which implies that their sum also satisfies the wave equation. Next we verify that $u(r,t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\left\{u_{\rm R}(r,t)\right\},$$and$$\left(c^2 \nabla^2 - \partial_t^2 \right)\left\{u_{\rm L}(r,t)\right\},$$are separately zero. We do this because SymPy has difficulty simplifying the combined expression.
###Code
print(sp.simplify(wavespeed**2*(sp.diff(uu_ID_SphericalGaussianOUT,xx[0],2) +
sp.diff(uu_ID_SphericalGaussianOUT,xx[1],2) +
sp.diff(uu_ID_SphericalGaussianOUT,xx[2],2))
- sp.diff(uu_ID_SphericalGaussianOUT,time,2)) )
print(sp.simplify(wavespeed**2*(sp.diff(uu_ID_SphericalGaussianIN,xx[0],2) +
sp.diff(uu_ID_SphericalGaussianIN,xx[1],2) +
sp.diff(uu_ID_SphericalGaussianIN,xx[2],2))
- sp.diff(uu_ID_SphericalGaussianIN,time,2)))
###Output
0
0
###Markdown
Step 5: Code Validation against `ScalarWave.InitialData` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation2}$$As a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between1. this tutorial and 2. the NRPy+ [ScalarWave.InitialData](../edit/ScalarWave/InitialData.py) module.
###Code
# We just defined SymPy expressions for uu_ID and vv_ID in
# terms of other SymPy variables. Here, we will use the
# above-defined uu_ID and vv_ID to validate against the
# same expressions in the ScalarWave/InitialData.py
# module, to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the PlaneWave ID module itself.
#
# Step 6: Call the InitialData(Type="PlaneWave") function from within the
# ScalarWave/InitialData.py module,
# which should do exactly the same as in Steps 1-5 above.
import ScalarWave.InitialData as swid
swid.InitialData(Type="PlaneWave")
# Step 7: Consistency check between the tutorial notebook above
# and the PlaneWave option from within the
# ScalarWave/InitialData.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module: PlaneWave Case")
if sp.simplify(uu_ID_PlaneWave - swid.uu_ID) != 0:
print("TEST FAILED: uu_ID_PlaneWave - swid.uu_ID = "+str(sp.simplify(uu_ID_PlaneWave - swid.uu_ID))+"\t\t (should be zero)")
sys.exit(1)
if sp.simplify(vv_ID_PlaneWave - swid.vv_ID) != 0:
print("TEST FAILED: vv_ID_PlaneWave - swid.vv_ID = "+str(sp.simplify(vv_ID_PlaneWave - swid.vv_ID))+"\t\t (should be zero)")
sys.exit(1)
print("TESTS PASSED!")
# Step 8: Consistency check between the tutorial notebook above
# and the SphericalGaussian option from within the
# ScalarWave/InitialData.py module.
swid.InitialData(Type="SphericalGaussian")
print("Consistency check between ScalarWave tutorial and NRPy+ module: SphericalGaussian Case")
if sp.simplify(uu_ID_SphericalGaussian - swid.uu_ID) != 0:
print("TEST FAILED: uu_ID_SphericalGaussian - swid.uu_ID = "+str(sp.simplify(uu_ID_SphericalGaussian - swid.uu_ID))+"\t\t (should be zero)")
sys.exit(1)
if sp.simplify(vv_ID_SphericalGaussian - swid.vv_ID) != 0:
print("TEST FAILED: vv_ID_SphericalGaussian - swid.vv_ID = "+str(sp.simplify(vv_ID_SphericalGaussian - swid.vv_ID))+"\t\t (should be zero)")
sys.exit(1)
print("TESTS PASSED!")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module: PlaneWave Case
TESTS PASSED!
Consistency check between ScalarWave tutorial and NRPy+ module: SphericalGaussian Case
TESTS PASSED!
###Markdown
Step 6: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-ScalarWave.pdf](Tutorial-ScalarWave.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ScalarWave")
###Output
Created Tutorial-ScalarWave.tex, and compiled LaTeX file to PDF file
Tutorial-ScalarWave.pdf
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Generating C Code for the Scalar Wave Equation in Cartesian Coordinates Authors: Zach Etienne & Thiago Assumpção Formatting improvements courtesy Brandon Clark This module generates the C Code for the Scalarwave in Cartesian coordinates and sets up either monochromatic plane wave or spherical Gaussian [Initial Data](https://en.wikipedia.org/wiki/Initial_value_problem). **Notebook Status:** Validated **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented below ([right-hand-side expressions](code_validation1); [initial data expressions](code_validation2)). In addition, all expressions have been validated against a trusted code (the [original SENR/NRPy+ code](https://bitbucket.org/zach_etienne/nrpy)). NRPy+ Source Code for this module: * [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py)* [ScalarWave/InitialData.py](../edit/ScalarWave/InitialData.py) Introduction: Problem StatementWe wish to numerically solve the scalar wave equation as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem) in Cartesian coordinates:$$\partial_t^2 u = c^2 \nabla^2 u \text{,}$$where $u$ (the amplitude of the wave) is a function of time and space: $u = u(t,x,y,...)$ (spatial dimension as-yet unspecified) and $c$ is the wave speed, subject to some initial condition$$u(0,x,y,...) = f(x,y,...)$$and suitable spatial boundary conditions.As described in the next section, we will find it quite useful to define$$v(t,x,y,...) = \partial_t u(t,x,y,...).$$In this way, the second-order PDE is reduced to a set of two coupled first-order PDEs\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \nabla^2 u.\end{align}We will use NRPy+ to generate efficient C codes capable of generating both initial data $u(0,x,y,...) = f(x,y,...)$; $v(0,x,y,...)=g(x,y,...)$, as well as finite-difference expressions for the right-hand sides of the above expressions. These expressions are needed within the *Method of Lines* to "integrate" the solution forward in time. The Method of LinesOnce we have initial data, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs)**, so long as the initial value problem PDE can be written in the form$$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$where $\mathbf{M}$ is an $N\times N$ matrix filled with differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$. The scalar wave equation as written in the [previous module](Tutorial-ScalarWave.ipynb)\begin{equation}\partial_t \begin{bmatrix}u \\v \end{bmatrix}=\begin{bmatrix}0 & 1 \\c^2 \nabla^2 & 0 \end{bmatrix}\begin{bmatrix}u \\v \end{bmatrix}\end{equation}satisfies this requirement. Thus we can treat the spatial derivatives $\nabla^2 u$ of the scalar wave equation using **standard finite-difference approaches**, and the temporal derivatives $\partial_t u$ and $\partial_t v$ using **standard approaches for solving ODEs**. In [the next module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will apply the highly robust [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4), used widely for numerically solving ODEs, to "march" (integrate) the solution vector $\vec{f}$ forward in time from its initial value ("initial data"). Basic AlgorithmThe basic algorithm for solving the scalar wave equation [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), based on the Method of Lines (see section above) is outlined below, with NRPy+-based components highlighted in green. We will review how NRPy+ generates these core components in this module.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. Set gridfunction values to initial data.1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. Evaluate scalar wave RHS expressions. 1. Apply boundary conditions.**We refer to the right-hand side of the equation $\partial_t \vec{f} = \mathbf{M}\ \vec{f}$ as the RHS. In this case, we refer to the $\mathbf{M}\ \vec{f}$ as the "scalar wave RHSs".** In the following sections we will 1. Use NRPy+ to cast the scalar wave RHS expressions -- in finite difference form -- into highly efficient C code, 1. first in one spatial dimension with fourth-order finite differences, 1. and then in three spatial dimensions with tenth-order finite differences.1. Use NRPy+ to generate monochromatic plane-wave initial data for the scalar wave equation, where the wave propagates in an arbitrary direction.As for the $\nabla^2 u$ term, spatial derivatives are handled in NRPy+ via [finite differencing](https://en.wikipedia.org/wiki/Finite_difference).We will sample the solution $\{u,v\}$ at discrete, uniformly-sampled points in space and time. For simplicity, let's assume that we consider the wave equation in one spatial dimension. Then the solution at any sampled point in space and time is given by$$u^n_i = u(t_n,x_i) = u(t_0 + n \Delta t, x_0 + i \Delta x),$$where $\Delta t$ and $\Delta x$ represent the temporal and spatial resolution, respectively. $v^n_i$ is sampled at the same points in space and time. Table of Contents$$\label{toc}$$1. [Step 1](initializenrpy): Initialize core NRPy+ modules1. [Step 2](rhss1d): Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing1. [Step 3](rhss3d): Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing 1. [Step 3.a](code_validation1): Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module1. [Step 4](id): Setting up Initial Data for the Scalar Wave Equation 1. [Step 4.a](planewave): The Monochromatic Plane-Wave Solution 1. [Step 4.b](sphericalgaussian): The Spherical Gaussian Solution (*Courtesy Thiago Assumpção*)1. [Step 5](code_validation2): Code Validation against `ScalarWave.InitialData` NRPy+ module1. [Step 6](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Initialize core NRPy+ modules \[Back to [top](toc)\]$$\label{initializenrpy}$$Let's start by importing all the needed modules from NRPy+:
###Code
# Step P1: Import needed NRPy+ core modules:
import NRPy_param_funcs as par # NRPy+: Parameter interface
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import grid as gri # NRPy+: Functions having to do with numerical grids
import finite_difference as fin # NRPy+: Finite difference C code generation module
from outputC import lhrh # NRPy+: Core C code output module
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
###Output
_____no_output_____
###Markdown
Step 2: Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss1d}$$To minimize complication, we will first restrict ourselves to solving the wave equation in one spatial dimension, so$$\nabla^2 u = \partial_x^2 u.$$Extension of this operator to higher spatial dimensions is straightforward, particularly when using NRPy+.As was discussed in [the finite difference section of the tutorial](Tutorial-Finite_Difference_Derivatives.ipynb), NRPy+ approximates derivatives using [finite difference methods](), the second-order derivative $\partial_x^2$ accurate to fourth-order in uniform grid spacing $\Delta x$ (from fitting the unique 4th-degree polynomial to 5 sample points of $u$) is given by\begin{equation}\left[\partial_x^2 u(t,x)\right]_j = \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right)+ \mathcal{O}\left((\Delta x)^4\right).\end{equation}
###Code
# Step P2: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
thismodule = "ScalarWave"
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 1: Set the spatial dimension parameter, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",1)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4)
# Step 3: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 4: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 5: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
vv_rhs = sp.simplify(vv_rhs)
# Step 6: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)])
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expression:
* "const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1/3 - uu_i0m2/12 + 4*uu_i0p1/3 - uu_i0p2/12)"
*/
const double uu_i0m2 = in_gfs[IDX2(UUGF, i0-2)];
const double uu_i0m1 = in_gfs[IDX2(UUGF, i0-1)];
const double uu = in_gfs[IDX2(UUGF, i0)];
const double uu_i0p1 = in_gfs[IDX2(UUGF, i0+1)];
const double uu_i0p2 = in_gfs[IDX2(UUGF, i0+2)];
const double vv = in_gfs[IDX2(VVGF, i0)];
const double FDPart1_Rational_5_2 = 5.0/2.0;
const double FDPart1_Rational_1_12 = 1.0/12.0;
const double FDPart1_Rational_4_3 = 4.0/3.0;
const double uu_dDD00 = ((invdx0)*(invdx0))*(FDPart1_Rational_1_12*(-uu_i0m2 - uu_i0p2) + FDPart1_Rational_4_3*(uu_i0m1 + uu_i0p1) - FDPart1_Rational_5_2*uu);
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[rhs_gfs[IDX2(UUGF, i0)] = vv,
* rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*wavespeed**2]"
*/
rhs_gfs[IDX2(UUGF, i0)] = vv;
rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*((wavespeed)*(wavespeed));
}
###Markdown
**Success!** Notice that indeed NRPy+ was able to compute the spatial derivative operator,\begin{equation}\left[\partial_x^2 u(t,x)\right]_j \approx \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right),\end{equation}correctly (easier to read in the "Original SymPy expressions" comment block at the top of the C output. Note that `invdx0`$=1/\Delta x_0$, where $\Delta x_0$ is the (uniform) grid spacing in the zeroth, or $x_0$ direction. Step 3: Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss3d}$$Let's next repeat the same process, only this time at **10th** finite difference order, for the **3-spatial-dimension** scalar wave equation, with SIMD enabled:
###Code
# Step 1: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 2: Set the spatial dimension parameter
# to *FOUR* this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 3: Set the finite differencing order to 10.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",10)
# Step 4a: Reset gridfunctions registered in 1D case above,
# to avoid NRPy+ throwing an error about double-
# registering gridfunctions, which is not allowed.
gri.glb_gridfcs_list = []
# Step 4b: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 5: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 6: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
# Step 7: Simplify the expression for c^2 \nabla^2 u (a.k.a., vv_rhs):
vv_rhs = sp.simplify(vv_rhs)
# Step 8: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)],params="SIMD_enable=True")
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY uu_dDD00 = invdx0**2*(-5269*uu/1800 + 5*uu_i0m1_i1_i2/3 - 5*uu_i0m2_i1_i2/21 + 5*uu_i0m3_i1_i2/126 - 5*uu_i0m4_i1_i2/1008 + uu_i0m5_i1_i2/3150 + 5*uu_i0p1_i1_i2/3 - 5*uu_i0p2_i1_i2/21 + 5*uu_i0p3_i1_i2/126 - 5*uu_i0p4_i1_i2/1008 + uu_i0p5_i1_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD11 = invdx1**2*(-5269*uu/1800 + 5*uu_i0_i1m1_i2/3 - 5*uu_i0_i1m2_i2/21 + 5*uu_i0_i1m3_i2/126 - 5*uu_i0_i1m4_i2/1008 + uu_i0_i1m5_i2/3150 + 5*uu_i0_i1p1_i2/3 - 5*uu_i0_i1p2_i2/21 + 5*uu_i0_i1p3_i2/126 - 5*uu_i0_i1p4_i2/1008 + uu_i0_i1p5_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD22 = invdx2**2*(-5269*uu/1800 + 5*uu_i0_i1_i2m1/3 - 5*uu_i0_i1_i2m2/21 + 5*uu_i0_i1_i2m3/126 - 5*uu_i0_i1_i2m4/1008 + uu_i0_i1_i2m5/3150 + 5*uu_i0_i1_i2p1/3 - 5*uu_i0_i1_i2p2/21 + 5*uu_i0_i1_i2p3/126 - 5*uu_i0_i1_i2p4/1008 + uu_i0_i1_i2p5/3150)]"
*/
const REAL_SIMD_ARRAY uu_i0_i1_i2m5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-5)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-1)]);
const REAL_SIMD_ARRAY uu_i0_i1m5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-1,i2)]);
const REAL_SIMD_ARRAY uu_i0m5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-1,i1,i2)]);
const REAL_SIMD_ARRAY uu = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+1,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+1)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+5)]);
const REAL_SIMD_ARRAY vv = ReadSIMD(&in_gfs[IDX4(VVGF, i0,i1,i2)]);
const double tmpFDPart1_NegativeOne_ = -1.0;
const REAL_SIMD_ARRAY FDPart1_NegativeOne_ = ConstSIMD(tmpFDPart1_NegativeOne_);
const double tmpFDPart1_Rational_1_3150 = 1.0/3150.0;
const REAL_SIMD_ARRAY FDPart1_Rational_1_3150 = ConstSIMD(tmpFDPart1_Rational_1_3150);
const double tmpFDPart1_Rational_5269_1800 = 5269.0/1800.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5269_1800 = ConstSIMD(tmpFDPart1_Rational_5269_1800);
const double tmpFDPart1_Rational_5_1008 = 5.0/1008.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_1008 = ConstSIMD(tmpFDPart1_Rational_5_1008);
const double tmpFDPart1_Rational_5_126 = 5.0/126.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_126 = ConstSIMD(tmpFDPart1_Rational_5_126);
const double tmpFDPart1_Rational_5_21 = 5.0/21.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_21 = ConstSIMD(tmpFDPart1_Rational_5_21);
const double tmpFDPart1_Rational_5_3 = 5.0/3.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_3 = ConstSIMD(tmpFDPart1_Rational_5_3);
const REAL_SIMD_ARRAY FDPart1_0 = MulSIMD(FDPart1_Rational_5269_1800, uu);
const REAL_SIMD_ARRAY uu_dDD00 = MulSIMD(MulSIMD(invdx0, invdx0), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0m3_i1_i2, uu_i0p3_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0m1_i1_i2, uu_i0p1_i1_i2), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0m5_i1_i2, uu_i0p5_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0m4_i1_i2, uu_i0p4_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0m2_i1_i2, uu_i0p2_i1_i2), FDPart1_0))))));
const REAL_SIMD_ARRAY uu_dDD11 = MulSIMD(MulSIMD(invdx1, invdx1), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0_i1m3_i2, uu_i0_i1p3_i2), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0_i1m1_i2, uu_i0_i1p1_i2), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0_i1m5_i2, uu_i0_i1p5_i2), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0_i1m4_i2, uu_i0_i1p4_i2), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0_i1m2_i2, uu_i0_i1p2_i2), FDPart1_0))))));
const REAL_SIMD_ARRAY uu_dDD22 = MulSIMD(MulSIMD(invdx2, invdx2), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0_i1_i2m3, uu_i0_i1_i2p3), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0_i1_i2m1, uu_i0_i1_i2p1), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0_i1_i2m5, uu_i0_i1_i2p5), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0_i1_i2m4, uu_i0_i1_i2p4), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0_i1_i2m2, uu_i0_i1_i2p2), FDPart1_0))))));
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY __RHS_exp_0 = vv,
* const REAL_SIMD_ARRAY __RHS_exp_1 = wavespeed**2*(uu_dDD00 + uu_dDD11 + uu_dDD22)]"
*/
const REAL_SIMD_ARRAY __RHS_exp_0 = vv;
const REAL_SIMD_ARRAY __RHS_exp_1 = MulSIMD(MulSIMD(wavespeed, wavespeed), AddSIMD(uu_dDD00, AddSIMD(uu_dDD11, uu_dDD22)));
WriteSIMD(&rhs_gfs[IDX4(UUGF, i0, i1, i2)], __RHS_exp_0);
WriteSIMD(&rhs_gfs[IDX4(VVGF, i0, i1, i2)], __RHS_exp_1);
}
###Markdown
Step 3.a: Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation1}$$Here, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the three-spatial-dimension Scalar Wave equation (i.e., `uu_rhs` and `vv_rhs`) between1. this tutorial and 2. the [NRPy+ ScalarWave.ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) module.
###Code
# Step 10: We already have SymPy expressions for uu_rhs and vv_rhs in
# terms of other SymPy variables. Even if we reset the list
# of NRPy+ gridfunctions, these *SymPy* expressions for
# uu_rhs and vv_rhs *will remain unaffected*.
#
# Here, we will use the above-defined uu_rhs and vv_rhs to
# validate against the same expressions in the
# ScalarWave/ScalarWave_RHSs.py module,
# to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the ScalarWave_RHSs.py module itself.
#
# Reset the list of gridfunctions, as registering a gridfunction
# twice will spawn an error.
gri.glb_gridfcs_list = []
# Step 11: Call the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module,
# which should do exactly the same as in Steps 1-10 above.
import ScalarWave.ScalarWave_RHSs as swrhs
swrhs.ScalarWave_RHSs()
# Step 12: Consistency check between the tutorial notebook above
# and the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_rhs - swrhs.uu_rhs = "+str(sp.simplify(uu_rhs - swrhs.uu_rhs))+"\t\t (should be zero)")
print("vv_rhs - swrhs.vv_rhs = "+str(sp.simplify(vv_rhs - swrhs.vv_rhs))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_rhs - swrhs.uu_rhs = 0 (should be zero)
vv_rhs - swrhs.vv_rhs = 0 (should be zero)
###Markdown
Step 4: Setting up Initial Data for the Scalar Wave Equation \[Back to [top](toc)\]$$\label{id}$$ Step 4.a: The Monochromatic Plane-Wave Solution \[Back to [top](toc)\]$$\label{planewave}$$The solution to the scalar wave equation for a monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction is$$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$where $\hat{k}$ is a unit vector. We choose $f(\hat{k}\cdot\vec{x} - c t)$ to take the form$$f(\hat{k}\cdot\vec{x} - c t) = \sin\left(\hat{k}\cdot\vec{x} - c t\right) + 2,$$where we add the $+2$ to ensure that the exact solution never crosses through zero. In places where the exact solution passes through zero, the relative error (i.e., the measure of error to compare numerical with exact results) is undefined. Also, $f(\hat{k}\cdot\vec{x} - c t)$ plus a constant is still a solution to the wave equation.
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx # Sets the Cartesian coordinates xx[0]=x; xx[1]=y; xx[2]=z
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
kk = par.Cparameters("REAL", thismodule, ["kk0", "kk1", "kk2"],[1.0,1.0,1.0])
# Step 3: Normalize the k vector
kk_norm = sp.sqrt(kk[0]**2 + kk[1]**2 + kk[2]**2)
# Step 4: Compute k.x
dot_product = sp.sympify(0)
for i in range(DIM):
dot_product += xx[i]*kk[i]
dot_product /= kk_norm
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID_PlaneWave = sp.sin(dot_product - wavespeed*time)+2
vv_ID_PlaneWave = sp.diff(uu_ID_PlaneWave, time)
###Output
_____no_output_____
###Markdown
Next we verify that $f(\hat{k}\cdot\vec{x} - c t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\ f\left(\hat{k}\cdot\vec{x} - c t\right),$$and confirming the result is exactly zero.
###Code
sp.simplify(wavespeed**2*(sp.diff(uu_ID_PlaneWave,xx[0],2) +
sp.diff(uu_ID_PlaneWave,xx[1],2) +
sp.diff(uu_ID_PlaneWave,xx[2],2))
- sp.diff(uu_ID_PlaneWave,time,2))
###Output
_____no_output_____
###Markdown
Step 4.b: The Spherical Gaussian Solution \[Back to [top](toc)\]$$\label{sphericalgaussian}$$Here we will implement the spherical Gaussian solution, consists of ingoing and outgoing wave fronts:\begin{align}u(r,t) &= u_{\rm out}(r,t) + u_{\rm in}(r,t) + 1,\ \ \text{where}\\u_{\rm out}(r,t) &=\frac{r-ct}{r} \exp\left[\frac{-(r-ct)^2}{2 \sigma^2}\right] \\u_{\rm in}(r,t) &=\frac{r+ct}{r} \exp\left[\frac{-(r+ct)^2}{2 \sigma^2}\right] \\\end{align}where $c$ is the wavespeed, and $\sigma$ is the width of the Gaussian (i.e., the "standard deviation").
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx # Sets the Cartesian coordinates xx[0]=x; xx[1]=y; xx[2]=z
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
sigma = par.Cparameters("REAL", thismodule, "sigma",3.0)
# Step 4: Compute r
r = sp.sympify(0)
for i in range(DIM):
r += xx[i]**2
r = sp.sqrt(r)
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID_SphericalGaussianOUT = +(r - wavespeed*time)/r * sp.exp( -(r - wavespeed*time)**2 / (2*sigma**2) )
uu_ID_SphericalGaussianIN = +(r + wavespeed*time)/r * sp.exp( -(r + wavespeed*time)**2 / (2*sigma**2) )
uu_ID_SphericalGaussian = uu_ID_SphericalGaussianOUT + uu_ID_SphericalGaussianIN + sp.sympify(1)
vv_ID_SphericalGaussian = sp.diff(uu_ID_SphericalGaussian, time)
###Output
_____no_output_____
###Markdown
Since the wave equation is linear, both the leftgoing and rightgoing waves must satisfy the wave equation, which implies that their sum also satisfies the wave equation. Next we verify that $u(r,t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\left\{u_{\rm R}(r,t)\right\},$$and$$\left(c^2 \nabla^2 - \partial_t^2 \right)\left\{u_{\rm L}(r,t)\right\},$$are separately zero. We do this because SymPy has difficulty simplifying the combined expression.
###Code
print(sp.simplify(wavespeed**2*(sp.diff(uu_ID_SphericalGaussianOUT,xx[0],2) +
sp.diff(uu_ID_SphericalGaussianOUT,xx[1],2) +
sp.diff(uu_ID_SphericalGaussianOUT,xx[2],2))
- sp.diff(uu_ID_SphericalGaussianOUT,time,2)) )
print(sp.simplify(wavespeed**2*(sp.diff(uu_ID_SphericalGaussianIN,xx[0],2) +
sp.diff(uu_ID_SphericalGaussianIN,xx[1],2) +
sp.diff(uu_ID_SphericalGaussianIN,xx[2],2))
- sp.diff(uu_ID_SphericalGaussianIN,time,2)))
###Output
0
0
###Markdown
Step 5: Code Validation against `ScalarWave.InitialData` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation2}$$As a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between1. this tutorial and 2. the NRPy+ [ScalarWave.InitialData](../edit/ScalarWave/InitialData.py) module.
###Code
# We just defined SymPy expressions for uu_ID and vv_ID in
# terms of other SymPy variables. Here, we will use the
# above-defined uu_ID and vv_ID to validate against the
# same expressions in the ScalarWave/InitialData.py
# module, to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the PlaneWave ID module itself.
#
# Step 6: Call the InitialData(Type="PlaneWave") function from within the
# ScalarWave/InitialData.py module,
# which should do exactly the same as in Steps 1-5 above.
import ScalarWave.InitialData as swid
swid.InitialData(Type="PlaneWave")
# Step 7: Consistency check between the tutorial notebook above
# and the PlaneWave option from within the
# ScalarWave/InitialData.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module: PlaneWave Case")
if sp.simplify(uu_ID_PlaneWave - swid.uu_ID) != 0:
print("TEST FAILED: uu_ID_PlaneWave - swid.uu_ID = "+str(sp.simplify(uu_ID_PlaneWave - swid.uu_ID))+"\t\t (should be zero)")
sys.exit(1)
if sp.simplify(vv_ID_PlaneWave - swid.vv_ID) != 0:
print("TEST FAILED: vv_ID_PlaneWave - swid.vv_ID = "+str(sp.simplify(vv_ID_PlaneWave - swid.vv_ID))+"\t\t (should be zero)")
sys.exit(1)
print("TESTS PASSED!")
# Step 8: Consistency check between the tutorial notebook above
# and the SphericalGaussian option from within the
# ScalarWave/InitialData.py module.
swid.InitialData(Type="SphericalGaussian")
print("Consistency check between ScalarWave tutorial and NRPy+ module: SphericalGaussian Case")
if sp.simplify(uu_ID_SphericalGaussian - swid.uu_ID) != 0:
print("TEST FAILED: uu_ID_SphericalGaussian - swid.uu_ID = "+str(sp.simplify(uu_ID_SphericalGaussian - swid.uu_ID))+"\t\t (should be zero)")
sys.exit(1)
if sp.simplify(vv_ID_SphericalGaussian - swid.vv_ID) != 0:
print("TEST FAILED: vv_ID_SphericalGaussian - swid.vv_ID = "+str(sp.simplify(vv_ID_SphericalGaussian - swid.vv_ID))+"\t\t (should be zero)")
sys.exit(1)
print("TESTS PASSED!")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module: PlaneWave Case
TESTS PASSED!
Consistency check between ScalarWave tutorial and NRPy+ module: SphericalGaussian Case
TESTS PASSED!
###Markdown
Step 6: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-ScalarWave.pdf](Tutorial-ScalarWave.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ScalarWave")
###Output
Created Tutorial-ScalarWave.tex, and compiled LaTeX file to PDF file
Tutorial-ScalarWave.pdf
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Generating C Code for the Scalar Wave Equation in Cartesian Coordinates Author: Zach Etienne Formatting improvements courtesy Brandon Clark This module generates the C Code for the Scalarwave in Cartesian coordinates and sets up [Plane Wave Initial Data](https://en.wikipedia.org/wiki/Initial_value_problem). **Module Status:** Validated **Validation Notes:** This tutorial module has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](code_validation). In addition, all expressions have been validated against a trusted code (the [original SENR/NRPy+ code](https://bitbucket.org/zach_etienne/nrpy)). NRPy+ Source Code for this module: * [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py)* [ScalarWave/InitialData_PlaneWave.py](../edit/ScalarWave/InitialData_PlaneWave.py) Introduction: Problem StatementWe wish to numerically solve the scalar wave equation as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem) in Cartesian coordinates:$$\partial_t^2 u = c^2 \nabla^2 u \text{,}$$where $u$ (the amplitude of the wave) is a function of time and space: $u = u(t,x,y,...)$ (spatial dimension as-yet unspecified) and $c$ is the wave speed, subject to some initial condition$$u(0,x,y,...) = f(x,y,...)$$and suitable spatial boundary conditions.As described in the next section, we will find it quite useful to define$$v(t,x,y,...) = \partial_t u(t,x,y,...).$$In this way, the second-order PDE is reduced to a set of two coupled first-order PDEs\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \nabla^2 u.\end{align}We will use NRPy+ to generate efficient C codes capable of generating both initial data $u(0,x,y,...) = f(x,y,...)$; $v(0,x,y,...)=g(x,y,...)$, as well as finite-difference expressions for the right-hand sides of the above expressions. These expressions are needed within the *Method of Lines* to "integrate" the solution forward in time. The Method of LinesOnce we have initial data, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs)**, so long as the initial value problem PDE can be written in the form$$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$where $\mathbf{M}$ is an $N\times N$ matrix filled with differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$. The scalar wave equation as written in the [previous module](Tutorial-ScalarWave.ipynb)\begin{equation}\partial_t \begin{bmatrix}u \\v \end{bmatrix}=\begin{bmatrix}0 & 1 \\c^2 \nabla^2 & 0 \end{bmatrix}\begin{bmatrix}u \\v \end{bmatrix}\end{equation}satisfies this requirement. Thus we can treat the spatial derivatives $\nabla^2 u$ of the scalar wave equation using **standard finite-difference approaches**, and the temporal derivatives $\partial_t u$ and $\partial_t v$ using **standard approaches for solving ODEs**. In [the next module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will apply the highly robust [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4), used widely for numerically solving ODEs, to "march" (integrate) the solution vector $\vec{f}$ forward in time from its initial value ("initial data"). Basic AlgorithmThe basic algorithm for solving the scalar wave equation [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), based on the Method of Lines (see section above) is outlined below, with NRPy+-based components highlighted in green. We will review how NRPy+ generates these core components in this module.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. Set gridfunction values to initial data.1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. Evaluate scalar wave RHS expressions. 1. Apply boundary conditions.**We refer to the right-hand side of the equation $\partial_t \vec{f} = \mathbf{M}\ \vec{f}$ as the RHS. In this case, we refer to the $\mathbf{M}\ \vec{f}$ as the "scalar wave RHSs".** In the following sections we will 1. Use NRPy+ to cast the scalar wave RHS expressions -- in finite difference form -- into highly efficient C code, 1. first in one spatial dimension with fourth-order finite differences, 1. and then in three spatial dimensions with tenth-order finite differences.1. Use NRPy+ to generate monochromatic plane-wave initial data for the scalar wave equation, where the wave propagates in an arbitrary direction.As for the $\nabla^2 u$ term, spatial derivatives are handled in NRPy+ via [finite differencing](https://en.wikipedia.org/wiki/Finite_difference).We will sample the solution $\{u,v\}$ at discrete, uniformly-sampled points in space and time. For simplicity, let's assume that we consider the wave equation in one spatial dimension. Then the solution at any sampled point in space and time is given by$$u^n_i = u(t_n,x_i) = u(t_0 + n \Delta t, x_0 + i \Delta x),$$where $\Delta t$ and $\Delta x$ represent the temporal and spatial resolution, respectively. $v^n_i$ is sampled at the same points in space and time. Table of Contents$$\label{toc}$$1. [Step 1](initializenrpy): Initialize core NRPy+ modules1. [Step 2](step2): Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing1. [Step 3](step3): Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing 1. [Step 3.a](code_validation1): Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module1. [Step 4](step4): Plane-Wave Initial Data for the Scalar Wave Equation 1. [Step 4.a](code_validation2): Code Validation against `ScalarWave.InitialData_PlaneWave` NRPy+ module1. [Step 5](latex_pdf_output): Output this module to $\LaTeX$-formatted PDF file Step 1: Initialize core NRPy+ modules \[Back to [top](toc)\]$$\label{initializenrpy}$$Let's start by importing all the needed modules from NRPy+:
###Code
# Step P1: Import needed NRPy+ core modules:
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import finite_difference as fin
from outputC import *
###Output
_____no_output_____
###Markdown
Step 2: Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing \[Back to [top](toc)\]$$\label{step2}$$To minimize complication, we will first restrict ourselves to solving the wave equation in one spatial dimension, so$$\nabla^2 u = \partial_x^2 u.$$Extension of this operator to higher spatial dimensions is straightforward, particularly when using NRPy+.As was discussed in [the finite difference section of the tutorial](Tutorial-Finite_Difference_Derivatives.ipynb), NRPy+ approximates derivatives using [finite difference methods](), the second-order derivative $\partial_x^2$ accurate to fourth-order in uniform grid spacing $\Delta x$ (from fitting the unique 4th-degree polynomial to 5 sample points of $u$) is given by\begin{equation}\left[\partial_x^2 u(t,x)\right]_j = \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right)+ \mathcal{O}\left((\Delta x)^4\right).\end{equation}
###Code
# Step P2: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
thismodule = "ScalarWave"
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 1: Set the spatial dimension parameter, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",1)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4)
# Step 3: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 4: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 5: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
vv_rhs = sp.simplify(vv_rhs)
# Step 6: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)])
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expression:
* "const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1/3 - uu_i0m2/12 + 4*uu_i0p1/3 - uu_i0p2/12)"
*/
const double uu_i0m2 = in_gfs[IDX2(UUGF, i0-2)];
const double uu_i0m1 = in_gfs[IDX2(UUGF, i0-1)];
const double uu = in_gfs[IDX2(UUGF, i0)];
const double uu_i0p1 = in_gfs[IDX2(UUGF, i0+1)];
const double uu_i0p2 = in_gfs[IDX2(UUGF, i0+2)];
const double vv = in_gfs[IDX2(VVGF, i0)];
const double uu_dDD00 = pow(invdx0, 2)*(-5.0/2.0*uu + (4.0/3.0)*uu_i0m1 - 1.0/12.0*uu_i0m2 + (4.0/3.0)*uu_i0p1 - 1.0/12.0*uu_i0p2);
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[rhs_gfs[IDX2(UUGF, i0)] = vv,
* rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*wavespeed**2]"
*/
rhs_gfs[IDX2(UUGF, i0)] = vv;
rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*pow(wavespeed, 2);
}
###Markdown
**Success!** Notice that indeed NRPy+ was able to compute the spatial derivative operator,\begin{equation}\left[\partial_x^2 u(t,x)\right]_j \approx \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right),\end{equation}correctly (easier to read in the "Original SymPy expressions" comment block at the top of the C output. Note that `invdx0`$=1/\Delta x_0$, where $\Delta x_0$ is the (uniform) grid spacing in the zeroth, or $x_0$ direction. Step 3: Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing \[Back to [top](toc)\]$$\label{step3}$$Let's next repeat the same process, only this time at **10th** finite difference order, for the **3-spatial-dimension** scalar wave equation, with SIMD enabled:
###Code
# Step 1: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 2: Set the spatial dimension parameter
# to *FOUR* this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 3: Set the finite differencing order to 10.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",10)
# Step 4a: Reset gridfunctions registered in 1D case above,
# to avoid NRPy+ throwing an error about double-
# registering gridfunctions, which is not allowed.
gri.glb_gridfcs_list = []
# Step 4b: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 5: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 6: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
# Step 7: Simplify the expression for c^2 \nabla^2 u (a.k.a., vv_rhs):
vv_rhs = sp.simplify(vv_rhs)
# Step 8: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)],params="SIMD_enable=True")
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY uu_dDD00 = invdx0**2*(-5269*uu/1800 + 5*uu_i0m1_i1_i2/3 - 5*uu_i0m2_i1_i2/21 + 5*uu_i0m3_i1_i2/126 - 5*uu_i0m4_i1_i2/1008 + uu_i0m5_i1_i2/3150 + 5*uu_i0p1_i1_i2/3 - 5*uu_i0p2_i1_i2/21 + 5*uu_i0p3_i1_i2/126 - 5*uu_i0p4_i1_i2/1008 + uu_i0p5_i1_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD11 = invdx1**2*(-5269*uu/1800 + 5*uu_i0_i1m1_i2/3 - 5*uu_i0_i1m2_i2/21 + 5*uu_i0_i1m3_i2/126 - 5*uu_i0_i1m4_i2/1008 + uu_i0_i1m5_i2/3150 + 5*uu_i0_i1p1_i2/3 - 5*uu_i0_i1p2_i2/21 + 5*uu_i0_i1p3_i2/126 - 5*uu_i0_i1p4_i2/1008 + uu_i0_i1p5_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD22 = invdx2**2*(-5269*uu/1800 + 5*uu_i0_i1_i2m1/3 - 5*uu_i0_i1_i2m2/21 + 5*uu_i0_i1_i2m3/126 - 5*uu_i0_i1_i2m4/1008 + uu_i0_i1_i2m5/3150 + 5*uu_i0_i1_i2p1/3 - 5*uu_i0_i1_i2p2/21 + 5*uu_i0_i1_i2p3/126 - 5*uu_i0_i1_i2p4/1008 + uu_i0_i1_i2p5/3150)]"
*/
const REAL_SIMD_ARRAY uu_i0_i1_i2m5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-5)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-1)]);
const REAL_SIMD_ARRAY uu_i0_i1m5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-1,i2)]);
const REAL_SIMD_ARRAY uu_i0m5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-1,i1,i2)]);
const REAL_SIMD_ARRAY uu = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+1,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+1)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+5)]);
const REAL_SIMD_ARRAY vv = ReadSIMD(&in_gfs[IDX4(VVGF, i0,i1,i2)]);
const double tmpFD_Rational_1_3150_FDcoeff = 0.0003174603174603174603174603174603175;
const REAL_SIMD_ARRAY _Rational_1_3150_FDcoeff = ConstSIMD(tmpFD_Rational_1_3150_FDcoeff);
const double tmpFD_Rational_5_126_FDcoeff = 0.03968253968253968253968253968253968;
const REAL_SIMD_ARRAY _Rational_5_126_FDcoeff = ConstSIMD(tmpFD_Rational_5_126_FDcoeff);
const double tmpFD_Rational_5_3_FDcoeff = 1.666666666666666666666666666666667;
const REAL_SIMD_ARRAY _Rational_5_3_FDcoeff = ConstSIMD(tmpFD_Rational_5_3_FDcoeff);
const double tmpFD_Rational_m5269_1800_FDcoeff = -2.927222222222222222222222222222222;
const REAL_SIMD_ARRAY _Rational_m5269_1800_FDcoeff = ConstSIMD(tmpFD_Rational_m5269_1800_FDcoeff);
const double tmpFD_Rational_m5_1008_FDcoeff = -0.004960317460317460317460317460317460;
const REAL_SIMD_ARRAY _Rational_m5_1008_FDcoeff = ConstSIMD(tmpFD_Rational_m5_1008_FDcoeff);
const double tmpFD_Rational_m5_21_FDcoeff = -0.2380952380952380952380952380952381;
const REAL_SIMD_ARRAY _Rational_m5_21_FDcoeff = ConstSIMD(tmpFD_Rational_m5_21_FDcoeff);
const REAL_SIMD_ARRAY tmpFD0 = MulSIMD(uu, _Rational_m5269_1800_FDcoeff);
const REAL_SIMD_ARRAY uu_dDD00 = MulSIMD(FusedMulAddSIMD(uu_i0p4_i1_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0p3_i1_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0p2_i1_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0p1_i1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0m5_i1_i2, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0m4_i1_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0m3_i1_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0m2_i1_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0m1_i1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0p5_i1_i2, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx0, invdx0));
const REAL_SIMD_ARRAY uu_dDD11 = MulSIMD(FusedMulAddSIMD(uu_i0_i1p4_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1p3_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1p2_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1p1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1m5_i2, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0_i1m4_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1m3_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1m2_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1m1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1p5_i2, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx1, invdx1));
const REAL_SIMD_ARRAY uu_dDD22 = MulSIMD(FusedMulAddSIMD(uu_i0_i1_i2p4, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p3, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p1, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m5, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m4, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m3, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m1, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p5, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx2, invdx2));
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY __RHS_exp_0 = vv,
* const REAL_SIMD_ARRAY __RHS_exp_1 = wavespeed**2*(uu_dDD00 + uu_dDD11 + uu_dDD22)]"
*/
const REAL_SIMD_ARRAY __RHS_exp_0 = vv;
const REAL_SIMD_ARRAY __RHS_exp_1 = MulSIMD(AddSIMD(uu_dDD11, AddSIMD(uu_dDD00, uu_dDD22)), MulSIMD(wavespeed, wavespeed));
WriteSIMD(&rhs_gfs[IDX4(UUGF, i0, i1, i2)], __RHS_exp_0);
WriteSIMD(&rhs_gfs[IDX4(VVGF, i0, i1, i2)], __RHS_exp_1);
}
###Markdown
Step 3.a: Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation1}$$Here, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the three-spatial-dimension Scalar Wave equation (i.e., `uu_rhs` and `vv_rhs`) between1. this tutorial and 2. the [NRPy+ ScalarWave.ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) module.
###Code
# Step 10: We already have SymPy expressions for uu_rhs and vv_rhs in
# terms of other SymPy variables. Even if we reset the list
# of NRPy+ gridfunctions, these *SymPy* expressions for
# uu_rhs and vv_rhs *will remain unaffected*.
#
# Here, we will use the above-defined uu_rhs and vv_rhs to
# validate against the same expressions in the
# ScalarWave/ScalarWave_RHSs.py module,
# to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the ScalarWave_RHSs.py module itself.
#
# Reset the list of gridfunctions, as registering a gridfunction
# twice will spawn an error.
gri.glb_gridfcs_list = []
# Step 11: Call the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module,
# which should do exactly the same as in Steps 1-10 above.
import ScalarWave.ScalarWave_RHSs as swrhs
swrhs.ScalarWave_RHSs()
# Step 12: Consistency check between the tutorial module above
# and the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_rhs - swrhs.uu_rhs = "+str(sp.simplify(uu_rhs - swrhs.uu_rhs))+"\t\t (should be zero)")
print("vv_rhs - swrhs.vv_rhs = "+str(sp.simplify(vv_rhs - swrhs.vv_rhs))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_rhs - swrhs.uu_rhs = 0 (should be zero)
vv_rhs - swrhs.vv_rhs = 0 (should be zero)
###Markdown
Step 4: Plane-Wave Initial Data for the Scalar Wave Equation \[Back to [top](toc)\]$$\label{step4}$$The solution to the scalar wave equation for a monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction is$$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$where $\hat{k}$ is a unit vector. We choose $f(\hat{k}\cdot\vec{x} - c t)$ to take the form$$f(\hat{k}\cdot\vec{x} - c t) = \sin\left(\hat{k}\cdot\vec{x} - c t\right) + 2,$$where we add the $+2$ to ensure that the exact solution never crosses through zero. In places where the exact solution passes through zero, the relative error (i.e., the measure of error to compare numerical with exact results) is undefined. Also, $f(\hat{k}\cdot\vec{x} - c t)$ plus a constant is still a solution to the wave equation.
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
kk = par.Cparameters("REAL", thismodule, ["kk0", "kk1", "kk2"],[1.0,1.0,1.0])
# Step 3: Normalize the k vector
kk_norm = sp.sqrt(kk[0]**2 + kk[1]**2 + kk[2]**2)
# Step 4: Compute k.x
dot_product = sp.sympify(0)
for i in range(DIM):
dot_product += xx[i]*kk[i]
dot_product /= kk_norm
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID = sp.sin(dot_product - wavespeed*time)+2
vv_ID = sp.diff(uu_ID, time)
###Output
_____no_output_____
###Markdown
Next we verify that $f(\hat{k}\cdot\vec{x} - c t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\ f\left(\hat{k}\cdot\vec{x} - c t\right),$$and confirming the result is exactly zero.
###Code
sp.simplify(wavespeed**2*(sp.diff(uu_ID,xx[0],2) +
sp.diff(uu_ID,xx[1],2) +
sp.diff(uu_ID,xx[2],2))
- sp.diff(uu_ID,time,2))
###Output
_____no_output_____
###Markdown
Step 4.a: Code Validation against `ScalarWave.InitialData_PlaneWave` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation2}$$As a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between1. this tutorial and 2. the NRPy+ [ScalarWave.InitialData_PlaneWave](../edit/ScalarWave/InitialData_PlaneWave.py) module.
###Code
# We just defined SymPy expressions for uu_ID and vv_ID in
# terms of other SymPy variables. Here, we will use the
# above-defined uu_ID and vv_ID to validate against the
# same expressions in the ScalarWave/InitialData_PlaneWave.py
# module, to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the PlaneWave ID module itself.
#
# Step 6: Call the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module,
# which should do exactly the same as in Steps 1-5 above.
import ScalarWave.InitialData_PlaneWave as swid
swid.InitialData_PlaneWave()
# Step 7: Consistency check between the tutorial module above
# and the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_ID - swid.uu_ID = "+str(sp.simplify(uu_ID - swid.uu_ID))+"\t\t (should be zero)")
print("vv_ID - swid.vv_ID = "+str(sp.simplify(vv_ID - swid.vv_ID))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_ID - swid.uu_ID = 0 (should be zero)
vv_ID - swid.vv_ID = 0 (should be zero)
###Markdown
Step 5: Output this module to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-ScalarWave.pdf](Tutorial-ScalarWave.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx Tutorial-ScalarWave.ipynb
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!rm -f Tut*.out Tut*.aux Tut*.log
###Output
[NbConvertApp] Converting notebook Tutorial-ScalarWave.ipynb to latex
[NbConvertApp] Writing 64896 bytes to Tutorial-ScalarWave.tex
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
###Markdown
Generating C Code for the Scalar Wave Equation in Cartesian Coordinates Author: Zach Etienne Formatting improvements courtesy Brandon Clark This module generates the C Code for the Scalarwave in Cartesian coordinates and sets up [Plane Wave Initial Data](https://en.wikipedia.org/wiki/Initial_value_problem). NRPy+ Source Code for this module: * [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py)* [ScalarWave/InitialData_PlaneWave.py](../edit/ScalarWave/InitialData_PlaneWave.py) Introduction: Problem StatementWe wish to numerically solve the scalar wave equation as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem) in Cartesian coordinates:$$\partial_t^2 u = c^2 \nabla^2 u \text{,}$$where $u$ (the amplitude of the wave) is a function of time and space: $u = u(t,x,y,...)$ (spatial dimension as-yet unspecified) and $c$ is the wave speed, subject to some initial condition$$u(0,x,y,...) = f(x,y,...)$$and suitable spatial boundary conditions.As described in the next section, we will find it quite useful to define$$v(t,x,y,...) = \partial_t u(t,x,y,...).$$In this way, the second-order PDE is reduced to a set of two coupled first-order PDEs\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \nabla^2 u.\end{align}We will use NRPy+ to generate efficient C codes capable of generating both initial data $u(0,x,y,...) = f(x,y,...)$; $v(0,x,y,...)=g(x,y,...)$, as well as finite-difference expressions for the right-hand sides of the above expressions. These expressions are needed within the *Method of Lines* to "integrate" the solution forward in time. The Method of LinesOnce we have initial data, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs)**, so long as the initial value problem PDE can be written in the form$$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$where $\mathbf{M}$ is an $N\times N$ matrix filled with differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$. The scalar wave equation as written in the [previous module](Tutorial-ScalarWave.ipynb)\begin{equation}\partial_t \begin{bmatrix}u \\v \end{bmatrix}=\begin{bmatrix}0 & 1 \\c^2 \nabla^2 & 0 \end{bmatrix}\begin{bmatrix}u \\v \end{bmatrix}\end{equation}satisfies this requirement. Thus we can treat the spatial derivatives $\nabla^2 u$ of the scalar wave equation using **standard finite-difference approaches**, and the temporal derivatives $\partial_t u$ and $\partial_t v$ using **standard approaches for solving ODEs**. In [the next module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will apply the highly robust [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4), used widely for numerically solving ODEs, to "march" (integrate) the solution vector $\vec{f}$ forward in time from its initial value ("initial data"). Basic AlgorithmThe basic algorithm for solving the scalar wave equation [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), based on the Method of Lines (see section above) is outlined below, with NRPy+-based components highlighted in green. We will review how NRPy+ generates these core components in this module.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. Set gridfunction values to initial data.1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. Evaluate scalar wave RHS expressions. 1. Apply boundary conditions.**We refer to the right-hand side of the equation $\partial_t \vec{f} = \mathbf{M}\ \vec{f}$ as the RHS. In this case, we refer to the $\mathbf{M}\ \vec{f}$ as the "scalar wave RHSs".** In the following sections we will 1. Use NRPy+ to cast the scalar wave RHS expressions -- in finite difference form -- into highly efficient C code, 1. first in one spatial dimension with fourth-order finite differences, 1. and then in three spatial dimensions with tenth-order finite differences.1. Use NRPy+ to generate monochromatic plane-wave initial data for the scalar wave equation, where the wave propagates in an arbitrary direction.As for the $\nabla^2 u$ term, spatial derivatives are handled in NRPy+ via [finite differencing](https://en.wikipedia.org/wiki/Finite_difference).We will sample the solution $\{u,v\}$ at discrete, uniformly-sampled points in space and time. For simplicity, let's assume that we consider the wave equation in one spatial dimension. Then the solution at any sampled point in space and time is given by$$u^n_i = u(t_n,x_i) = u(t_0 + n \Delta t, x_0 + i \Delta x),$$where $\Delta t$ and $\Delta x$ represent the temporal and spatial resolution, respectively. $v^n_i$ is sampled at the same points in space and time. Table of Contents$$\label{toc}$$1. [Step 1](initializenrpy): Initialize core NRPy+ modules1. [Step 2](step2): Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing1. [Step 3](step3): Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing 1. [Step 3.a](code_validation1): Code Validation against ScalarWave.ScalarWave_RHSs NRPy+ module1. [Step 4](step4): Plane-Wave Initial Data for the Scalar Wave Equation 1. [Step 4.a](code_validation2): Code Validation against ScalarWave.InitialData_PlaneWave NRPy+ module1. [Step 5](latex_pdf_output): Output this module to $\LaTeX$-formatted PDF file Step 1: Initialize core NRPy+ modules \[Back to [top](toc)\]$$\label{initializenrpy}$$Let's start by importing all the needed modules from NRPy+:
###Code
# Step P1: Import needed NRPy+ core modules:
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import finite_difference as fin
from outputC import *
###Output
_____no_output_____
###Markdown
Step 2: Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing \[Back to [top](toc)\]$$\label{step2}$$To minimize complication, we will first restrict ourselves to solving the wave equation in one spatial dimension, so$$\nabla^2 u = \partial_x^2 u.$$Extension of this operator to higher spatial dimensions is straightforward, particularly when using NRPy+.As was discussed in [the finite difference section of the tutorial](Tutorial-Finite_Difference_Derivatives.ipynb), NRPy+ approximates derivatives using [finite difference methods](), the second-order derivative $\partial_x^2$ accurate to fourth-order in uniform grid spacing $\Delta x$ (from fitting the unique 4th-degree polynomial to 5 sample points of $u$) is given by\begin{equation}\left[\partial_x^2 u(t,x)\right]_j = \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right)+ \mathcal{O}\left((\Delta x)^4\right).\end{equation}
###Code
# Step P2: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
thismodule = "ScalarWave"
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed")
# Step 1: Set the spatial dimension parameter, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",1)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4)
# Step 3: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 4: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 5: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
vv_rhs = sp.simplify(vv_rhs)
# Step 6: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)])
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expression:
* "const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1/3 - uu_i0m2/12 + 4*uu_i0p1/3 - uu_i0p2/12)"
*/
const double uu_i0m2 = in_gfs[IDX2(UUGF, i0-2)];
const double uu_i0m1 = in_gfs[IDX2(UUGF, i0-1)];
const double uu = in_gfs[IDX2(UUGF, i0)];
const double uu_i0p1 = in_gfs[IDX2(UUGF, i0+1)];
const double uu_i0p2 = in_gfs[IDX2(UUGF, i0+2)];
const double vv = in_gfs[IDX2(VVGF, i0)];
const double uu_dDD00 = pow(invdx0, 2)*(-5.0/2.0*uu + (4.0/3.0)*uu_i0m1 - 1.0/12.0*uu_i0m2 + (4.0/3.0)*uu_i0p1 - 1.0/12.0*uu_i0p2);
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[rhs_gfs[IDX2(UUGF, i0)] = vv,
* rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*wavespeed**2]"
*/
rhs_gfs[IDX2(UUGF, i0)] = vv;
rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*pow(wavespeed, 2);
}
###Markdown
**Success!** Notice that indeed NRPy+ was able to compute the spatial derivative operator,\begin{equation}\left[\partial_x^2 u(t,x)\right]_j \approx \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right),\end{equation}correctly (easier to read in the "Original SymPy expressions" comment block at the top of the C output. Note that $\texttt{invdx0}=1/\Delta x_0$, where $\Delta x_0$ is the (uniform) grid spacing in the zeroth, or $x_0$ direction. Step 3: Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing \[Back to [top](toc)\]$$\label{step3}$$Let's next repeat the same process, only this time at **10th** finite difference order, for the **3-spatial-dimension** scalar wave equation, with SIMD enabled:
###Code
# Step 1: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed")
# Step 2: Set the spatial dimension parameter
# to *FOUR* this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 3: Set the finite differencing order to 10.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",10)
# Step 4a: Reset gridfunctions registered in 1D case above,
# to avoid NRPy+ throwing an error about double-
# registering gridfunctions, which is not allowed.
gri.glb_gridfcs_list = []
# Step 4b: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 5: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 6: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
# Step 7: Simplify the expression for c^2 \nabla^2 u (a.k.a., vv_rhs):
vv_rhs = sp.simplify(vv_rhs)
# Step 8: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)],params="SIMD_enable=True")
###Output
initialize_param() minor warning: Did nothing; already initialized parameter ScalarWave::wavespeed
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY uu_dDD00 = invdx0**2*(-5269*uu/1800 + 5*uu_i0m1_i1_i2/3 - 5*uu_i0m2_i1_i2/21 + 5*uu_i0m3_i1_i2/126 - 5*uu_i0m4_i1_i2/1008 + uu_i0m5_i1_i2/3150 + 5*uu_i0p1_i1_i2/3 - 5*uu_i0p2_i1_i2/21 + 5*uu_i0p3_i1_i2/126 - 5*uu_i0p4_i1_i2/1008 + uu_i0p5_i1_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD11 = invdx1**2*(-5269*uu/1800 + 5*uu_i0_i1m1_i2/3 - 5*uu_i0_i1m2_i2/21 + 5*uu_i0_i1m3_i2/126 - 5*uu_i0_i1m4_i2/1008 + uu_i0_i1m5_i2/3150 + 5*uu_i0_i1p1_i2/3 - 5*uu_i0_i1p2_i2/21 + 5*uu_i0_i1p3_i2/126 - 5*uu_i0_i1p4_i2/1008 + uu_i0_i1p5_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD22 = invdx2**2*(-5269*uu/1800 + 5*uu_i0_i1_i2m1/3 - 5*uu_i0_i1_i2m2/21 + 5*uu_i0_i1_i2m3/126 - 5*uu_i0_i1_i2m4/1008 + uu_i0_i1_i2m5/3150 + 5*uu_i0_i1_i2p1/3 - 5*uu_i0_i1_i2p2/21 + 5*uu_i0_i1_i2p3/126 - 5*uu_i0_i1_i2p4/1008 + uu_i0_i1_i2p5/3150)]"
*/
const REAL_SIMD_ARRAY uu_i0_i1_i2m5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-5)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-1)]);
const REAL_SIMD_ARRAY uu_i0_i1m5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-1,i2)]);
const REAL_SIMD_ARRAY uu_i0m5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-1,i1,i2)]);
const REAL_SIMD_ARRAY uu = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+1,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+1)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+5)]);
const REAL_SIMD_ARRAY vv = ReadSIMD(&in_gfs[IDX4(VVGF, i0,i1,i2)]);
const double tmpFD_Rational_1_3150 = 0.0003174603174603174603174603174603175;
const REAL_SIMD_ARRAY _Rational_1_3150 = ConstSIMD(tmpFD_Rational_1_3150);
const double tmpFD_Rational_5_126 = 0.03968253968253968253968253968253968;
const REAL_SIMD_ARRAY _Rational_5_126 = ConstSIMD(tmpFD_Rational_5_126);
const double tmpFD_Rational_5_3 = 1.666666666666666666666666666666667;
const REAL_SIMD_ARRAY _Rational_5_3 = ConstSIMD(tmpFD_Rational_5_3);
const double tmpFD_Rational_m5269_1800 = -2.927222222222222222222222222222222;
const REAL_SIMD_ARRAY _Rational_m5269_1800 = ConstSIMD(tmpFD_Rational_m5269_1800);
const double tmpFD_Rational_m5_1008 = -0.004960317460317460317460317460317460;
const REAL_SIMD_ARRAY _Rational_m5_1008 = ConstSIMD(tmpFD_Rational_m5_1008);
const double tmpFD_Rational_m5_21 = -0.2380952380952380952380952380952381;
const REAL_SIMD_ARRAY _Rational_m5_21 = ConstSIMD(tmpFD_Rational_m5_21);
const REAL_SIMD_ARRAY tmpFD0 = MulSIMD(uu, _Rational_m5269_1800);
const REAL_SIMD_ARRAY uu_dDD00 = MulSIMD(FusedMulAddSIMD(uu_i0p4_i1_i2, _Rational_m5_1008, FusedMulAddSIMD(uu_i0p3_i1_i2, _Rational_5_126, FusedMulAddSIMD(uu_i0p2_i1_i2, _Rational_m5_21, FusedMulAddSIMD(uu_i0p1_i1_i2, _Rational_5_3, FusedMulAddSIMD(uu_i0m5_i1_i2, _Rational_1_3150, FusedMulAddSIMD(uu_i0m4_i1_i2, _Rational_m5_1008, FusedMulAddSIMD(uu_i0m3_i1_i2, _Rational_5_126, FusedMulAddSIMD(uu_i0m2_i1_i2, _Rational_m5_21, FusedMulAddSIMD(uu_i0m1_i1_i2, _Rational_5_3, FusedMulAddSIMD(uu_i0p5_i1_i2, _Rational_1_3150, tmpFD0)))))))))), MulSIMD(invdx0, invdx0));
const REAL_SIMD_ARRAY uu_dDD11 = MulSIMD(FusedMulAddSIMD(uu_i0_i1p4_i2, _Rational_m5_1008, FusedMulAddSIMD(uu_i0_i1p3_i2, _Rational_5_126, FusedMulAddSIMD(uu_i0_i1p2_i2, _Rational_m5_21, FusedMulAddSIMD(uu_i0_i1p1_i2, _Rational_5_3, FusedMulAddSIMD(uu_i0_i1m5_i2, _Rational_1_3150, FusedMulAddSIMD(uu_i0_i1m4_i2, _Rational_m5_1008, FusedMulAddSIMD(uu_i0_i1m3_i2, _Rational_5_126, FusedMulAddSIMD(uu_i0_i1m2_i2, _Rational_m5_21, FusedMulAddSIMD(uu_i0_i1m1_i2, _Rational_5_3, FusedMulAddSIMD(uu_i0_i1p5_i2, _Rational_1_3150, tmpFD0)))))))))), MulSIMD(invdx1, invdx1));
const REAL_SIMD_ARRAY uu_dDD22 = MulSIMD(FusedMulAddSIMD(uu_i0_i1_i2p4, _Rational_m5_1008, FusedMulAddSIMD(uu_i0_i1_i2p3, _Rational_5_126, FusedMulAddSIMD(uu_i0_i1_i2p2, _Rational_m5_21, FusedMulAddSIMD(uu_i0_i1_i2p1, _Rational_5_3, FusedMulAddSIMD(uu_i0_i1_i2m5, _Rational_1_3150, FusedMulAddSIMD(uu_i0_i1_i2m4, _Rational_m5_1008, FusedMulAddSIMD(uu_i0_i1_i2m3, _Rational_5_126, FusedMulAddSIMD(uu_i0_i1_i2m2, _Rational_m5_21, FusedMulAddSIMD(uu_i0_i1_i2m1, _Rational_5_3, FusedMulAddSIMD(uu_i0_i1_i2p5, _Rational_1_3150, tmpFD0)))))))))), MulSIMD(invdx2, invdx2));
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY __RHS_exp_0 = vv,
* const REAL_SIMD_ARRAY __RHS_exp_1 = wavespeed**2*(uu_dDD00 + uu_dDD11 + uu_dDD22)]"
*/
const REAL_SIMD_ARRAY __RHS_exp_0 = vv;
const REAL_SIMD_ARRAY __RHS_exp_1 = MulSIMD(AddSIMD(uu_dDD11, AddSIMD(uu_dDD00, uu_dDD22)), MulSIMD(wavespeed, wavespeed));
WriteSIMD(&rhs_gfs[IDX4(UUGF, i0, i1, i2)], __RHS_exp_0);
WriteSIMD(&rhs_gfs[IDX4(VVGF, i0, i1, i2)], __RHS_exp_1);
}
###Markdown
Step 3.a: Code Validation against ScalarWave.ScalarWave_RHSs NRPy+ module \[Back to [top](toc)\]$$\label{code_validation1}$$Here, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the three-spatial-dimension Scalar Wave equation (i.e., uu_rhs and vv_rhs) between1. this tutorial and 2. the [NRPy+ ScalarWave.ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) module.
###Code
# Step 10: We already have SymPy expressions for uu_rhs and vv_rhs in
# terms of other SymPy variables. Even if we reset the list
# of NRPy+ gridfunctions, these *SymPy* expressions for
# uu_rhs and vv_rhs *will remain unaffected*.
#
# Here, we will use the above-defined uu_rhs and vv_rhs to
# validate against the same expressions in the
# ScalarWave/ScalarWave_RHSs.py module,
# to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the ScalarWave_RHSs.py module itself.
#
# Reset the list of gridfunctions, as registering a gridfunction
# twice will spawn an error.
gri.glb_gridfcs_list = []
# Step 11: Call the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module,
# which should do exactly the same as in Steps 1-10 above.
import ScalarWave.ScalarWave_RHSs as swrhs
swrhs.ScalarWave_RHSs()
# Step 12: Consistency check between the tutorial module above
# and the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module.
# It is SAFE to ignore the warning from re-initializing the parameter RMAX.
print("^^^ Ignore the minor warning above. ^^^\n")
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_rhs - swrhs.uu_rhs: Should be zero: ",sp.simplify(uu_rhs - swrhs.uu_rhs))
print("vv_rhs - swrhs.vv_rhs: Should be zero: ",sp.simplify(vv_rhs - swrhs.vv_rhs))
###Output
^^^ Ignore the minor warning above. ^^^
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_rhs - swrhs.uu_rhs: Should be zero: 0
vv_rhs - swrhs.vv_rhs: Should be zero: 0
###Markdown
Step 4: Plane-Wave Initial Data for the Scalar Wave Equation \[Back to [top](toc)\]$$\label{step4}$$The solution to the scalar wave equation for a monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction is$$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$where $\hat{k}$ is a unit vector. We choose $f(\hat{k}\cdot\vec{x} - c t)$ to take the form$$f(\hat{k}\cdot\vec{x} - c t) = \sin\left(\hat{k}\cdot\vec{x} - c t\right) + 2,$$where we add the $+2$ to ensure that the exact solution never crosses through zero. In places where the exact solution passes through zero, the relative error (i.e., the measure of error to compare numerical with exact results) is undefined. Also, $f(\hat{k}\cdot\vec{x} - c t)$ plus a constant is still a solution to the wave equation.
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL",thismodule,"time")
kk = par.Cparameters("REAL",thismodule,["kk0","kk1","kk2"])
# Step 3: Normalize the k vector
kk_norm = sp.sqrt(kk[0]**2 + kk[1]**2 + kk[2]**2)
# Step 4: Compute k.x
dot_product = sp.sympify(0)
for i in range(DIM):
dot_product += xx[i]*kk[i]
dot_product /= kk_norm
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID = sp.sin(dot_product - wavespeed*time)+2
vv_ID = sp.diff(uu_ID, time)
###Output
_____no_output_____
###Markdown
Next we verify that $f(\hat{k}\cdot\vec{x} - c t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\ f\left(\hat{k}\cdot\vec{x} - c t\right),$$and confirming the result is exactly zero.
###Code
sp.simplify(wavespeed**2*(sp.diff(uu_ID,xx[0],2) +
sp.diff(uu_ID,xx[1],2) +
sp.diff(uu_ID,xx[2],2))
- sp.diff(uu_ID,time,2))
###Output
_____no_output_____
###Markdown
Step 4.a: Code Validation against ScalarWave.InitialData_PlaneWave NRPy+ module \[Back to [top](toc)\]$$\label{code_validation2}$$As a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between1. this tutorial and 2. the NRPy+ [ScalarWave.InitialData_PlaneWave](../edit/ScalarWave/InitialData_PlaneWave.py) module.
###Code
# We just defined SymPy expressions for uu_ID and vv_ID in
# terms of other SymPy variables. Here, we will use the
# above-defined uu_ID and vv_ID to validate against the
# same expressions in the ScalarWave/InitialData_PlaneWave.py
# module, to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the PlaneWave ID module itself.
#
# Step 6: Call the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module,
# which should do exactly the same as in Steps 1-5 above.
import ScalarWave.InitialData_PlaneWave as swid
swid.InitialData_PlaneWave()
# Step 7: Consistency check between the tutorial module above
# and the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module.
# It is SAFE to ignore the warning from re-initializing the parameter RMAX.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_ID - swid.uu_ID: Should be zero: ",sp.simplify(uu_ID - swid.uu_ID))
print("vv_ID - swid.vv_ID: Should be zero: ",sp.simplify(vv_ID - swid.vv_ID))
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_ID - swid.uu_ID: Should be zero: 0
vv_ID - swid.vv_ID: Should be zero: 0
###Markdown
Step 5: Output this module to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-ScalarWave.pdf](Tutorial-ScalarWave.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx Tutorial-ScalarWave.ipynb
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!rm -f Tut*.out Tut*.aux Tut*.log
###Output
[NbConvertApp] Converting notebook Tutorial-ScalarWave.ipynb to latex
[NbConvertApp] Writing 63651 bytes to Tutorial-ScalarWave.tex
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Generating C Code for the Scalar Wave Equation in Cartesian Coordinates Authors: Zach Etienne & Thiago Assumpção Formatting improvements courtesy Brandon Clark This module generates the C Code for the Scalarwave in Cartesian coordinates and sets up either monochromatic plane wave or spherical Gaussian [Initial Data](https://en.wikipedia.org/wiki/Initial_value_problem). **Notebook Status:** Validated **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented below ([right-hand-side expressions](code_validation1); [initial data expressions](code_validation2)). In addition, all expressions have been validated against a trusted code (the [original SENR/NRPy+ code](https://bitbucket.org/zach_etienne/nrpy)). NRPy+ Source Code for this module: * [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py)* [ScalarWave/InitialData.py](../edit/ScalarWave/InitialData.py) Introduction: Problem StatementWe wish to numerically solve the scalar wave equation as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem) in Cartesian coordinates:$$\partial_t^2 u = c^2 \nabla^2 u \text{,}$$where $u$ (the amplitude of the wave) is a function of time and space: $u = u(t,x,y,...)$ (spatial dimension as-yet unspecified) and $c$ is the wave speed, subject to some initial condition$$u(0,x,y,...) = f(x,y,...)$$and suitable spatial boundary conditions.As described in the next section, we will find it quite useful to define$$v(t,x,y,...) = \partial_t u(t,x,y,...).$$In this way, the second-order PDE is reduced to a set of two coupled first-order PDEs\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \nabla^2 u.\end{align}We will use NRPy+ to generate efficient C codes capable of generating both initial data $u(0,x,y,...) = f(x,y,...)$; $v(0,x,y,...)=g(x,y,...)$, as well as finite-difference expressions for the right-hand sides of the above expressions. These expressions are needed within the *Method of Lines* to "integrate" the solution forward in time. The Method of LinesOnce we have initial data, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs)**, so long as the initial value problem PDE can be written in the form$$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$where $\mathbf{M}$ is an $N\times N$ matrix filled with differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$. The scalar wave equation as written in the [previous module](Tutorial-ScalarWave.ipynb)\begin{equation}\partial_t \begin{bmatrix}u \\v \end{bmatrix}=\begin{bmatrix}0 & 1 \\c^2 \nabla^2 & 0 \end{bmatrix}\begin{bmatrix}u \\v \end{bmatrix}\end{equation}satisfies this requirement. Thus we can treat the spatial derivatives $\nabla^2 u$ of the scalar wave equation using **standard finite-difference approaches**, and the temporal derivatives $\partial_t u$ and $\partial_t v$ using **standard approaches for solving ODEs**. In [the next module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will apply the highly robust [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4), used widely for numerically solving ODEs, to "march" (integrate) the solution vector $\vec{f}$ forward in time from its initial value ("initial data"). Basic AlgorithmThe basic algorithm for solving the scalar wave equation [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), based on the Method of Lines (see section above) is outlined below, with NRPy+-based components highlighted in green. We will review how NRPy+ generates these core components in this module.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. Set gridfunction values to initial data.1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. Evaluate scalar wave RHS expressions. 1. Apply boundary conditions.**We refer to the right-hand side of the equation $\partial_t \vec{f} = \mathbf{M}\ \vec{f}$ as the RHS. In this case, we refer to the $\mathbf{M}\ \vec{f}$ as the "scalar wave RHSs".** In the following sections we will 1. Use NRPy+ to cast the scalar wave RHS expressions -- in finite difference form -- into highly efficient C code, 1. first in one spatial dimension with fourth-order finite differences, 1. and then in three spatial dimensions with tenth-order finite differences.1. Use NRPy+ to generate monochromatic plane-wave initial data for the scalar wave equation, where the wave propagates in an arbitrary direction.As for the $\nabla^2 u$ term, spatial derivatives are handled in NRPy+ via [finite differencing](https://en.wikipedia.org/wiki/Finite_difference).We will sample the solution $\{u,v\}$ at discrete, uniformly-sampled points in space and time. For simplicity, let's assume that we consider the wave equation in one spatial dimension. Then the solution at any sampled point in space and time is given by$$u^n_i = u(t_n,x_i) = u(t_0 + n \Delta t, x_0 + i \Delta x),$$where $\Delta t$ and $\Delta x$ represent the temporal and spatial resolution, respectively. $v^n_i$ is sampled at the same points in space and time. Table of Contents$$\label{toc}$$1. [Step 1](initializenrpy): Initialize core NRPy+ modules1. [Step 2](rhss1d): Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing1. [Step 3](rhss3d): Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing 1. [Step 3.a](code_validation1): Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module1. [Step 4](id): Setting up Initial Data for the Scalar Wave Equation 1. [Step 4.a](planewave): The Monochromatic Plane-Wave Solution 1. [Step 4.b](sphericalgaussian): The Spherical Gaussian Solution (*Courtesy Thiago Assumpção*)1. [Step 5](code_validation2): Code Validation against `ScalarWave.InitialData` NRPy+ module1. [Step 6](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Initialize core NRPy+ modules \[Back to [top](toc)\]$$\label{initializenrpy}$$Let's start by importing all the needed modules from NRPy+:
###Code
# Step P1: Import needed NRPy+ core modules:
import NRPy_param_funcs as par # NRPy+: Parameter interface
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import grid as gri # NRPy+: Functions having to do with numerical grids
import finite_difference as fin # NRPy+: Finite difference C code generation module
from outputC import lhrh # NRPy+: Core C code output module
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
###Output
_____no_output_____
###Markdown
Step 2: Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss1d}$$To minimize complication, we will first restrict ourselves to solving the wave equation in one spatial dimension, so$$\nabla^2 u = \partial_x^2 u.$$Extension of this operator to higher spatial dimensions is straightforward, particularly when using NRPy+.As was discussed in [the finite difference section of the tutorial](Tutorial-Finite_Difference_Derivatives.ipynb), NRPy+ approximates derivatives using [finite difference methods](), the second-order derivative $\partial_x^2$ accurate to fourth-order in uniform grid spacing $\Delta x$ (from fitting the unique 4th-degree polynomial to 5 sample points of $u$) is given by\begin{equation}\left[\partial_x^2 u(t,x)\right]_j = \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right)+ \mathcal{O}\left((\Delta x)^4\right).\end{equation}
###Code
# Step P2: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
thismodule = "ScalarWave"
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 1: Set the spatial dimension parameter, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",1)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4)
# Step 3: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 4: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 5: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
vv_rhs = sp.simplify(vv_rhs)
# Step 6: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)])
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expression:
* "const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1/3 - uu_i0m2/12 + 4*uu_i0p1/3 - uu_i0p2/12)"
*/
const double uu_i0m2 = in_gfs[IDX2(UUGF, i0-2)];
const double uu_i0m1 = in_gfs[IDX2(UUGF, i0-1)];
const double uu = in_gfs[IDX2(UUGF, i0)];
const double uu_i0p1 = in_gfs[IDX2(UUGF, i0+1)];
const double uu_i0p2 = in_gfs[IDX2(UUGF, i0+2)];
const double vv = in_gfs[IDX2(VVGF, i0)];
const double FDPart1_Rational_5_2 = 5.0/2.0;
const double FDPart1_Rational_1_12 = 1.0/12.0;
const double FDPart1_Rational_4_3 = 4.0/3.0;
const double uu_dDD00 = ((invdx0)*(invdx0))*(FDPart1_Rational_1_12*(-uu_i0m2 - uu_i0p2) + FDPart1_Rational_4_3*(uu_i0m1 + uu_i0p1) - FDPart1_Rational_5_2*uu);
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[rhs_gfs[IDX2(UUGF, i0)] = vv,
* rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*wavespeed**2]"
*/
rhs_gfs[IDX2(UUGF, i0)] = vv;
rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*((wavespeed)*(wavespeed));
}
###Markdown
**Success!** Notice that indeed NRPy+ was able to compute the spatial derivative operator,\begin{equation}\left[\partial_x^2 u(t,x)\right]_j \approx \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right),\end{equation}correctly (easier to read in the "Original SymPy expressions" comment block at the top of the C output. Note that `invdx0`$=1/\Delta x_0$, where $\Delta x_0$ is the (uniform) grid spacing in the zeroth, or $x_0$ direction. Step 3: Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss3d}$$Let's next repeat the same process, only this time at **10th** finite difference order, for the **3-spatial-dimension** scalar wave equation, with SIMD enabled:
###Code
# Step 1: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 2: Set the spatial dimension parameter
# to *FOUR* this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 3: Set the finite differencing order to 10.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",10)
# Step 4a: Reset gridfunctions registered in 1D case above,
# to avoid NRPy+ throwing an error about double-
# registering gridfunctions, which is not allowed.
gri.glb_gridfcs_list = []
# Step 4b: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 5: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 6: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
# Step 7: Simplify the expression for c^2 \nabla^2 u (a.k.a., vv_rhs):
vv_rhs = sp.simplify(vv_rhs)
# Step 8: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)],params="SIMD_enable=True")
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY uu_dDD00 = invdx0**2*(-5269*uu/1800 + 5*uu_i0m1_i1_i2/3 - 5*uu_i0m2_i1_i2/21 + 5*uu_i0m3_i1_i2/126 - 5*uu_i0m4_i1_i2/1008 + uu_i0m5_i1_i2/3150 + 5*uu_i0p1_i1_i2/3 - 5*uu_i0p2_i1_i2/21 + 5*uu_i0p3_i1_i2/126 - 5*uu_i0p4_i1_i2/1008 + uu_i0p5_i1_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD11 = invdx1**2*(-5269*uu/1800 + 5*uu_i0_i1m1_i2/3 - 5*uu_i0_i1m2_i2/21 + 5*uu_i0_i1m3_i2/126 - 5*uu_i0_i1m4_i2/1008 + uu_i0_i1m5_i2/3150 + 5*uu_i0_i1p1_i2/3 - 5*uu_i0_i1p2_i2/21 + 5*uu_i0_i1p3_i2/126 - 5*uu_i0_i1p4_i2/1008 + uu_i0_i1p5_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD22 = invdx2**2*(-5269*uu/1800 + 5*uu_i0_i1_i2m1/3 - 5*uu_i0_i1_i2m2/21 + 5*uu_i0_i1_i2m3/126 - 5*uu_i0_i1_i2m4/1008 + uu_i0_i1_i2m5/3150 + 5*uu_i0_i1_i2p1/3 - 5*uu_i0_i1_i2p2/21 + 5*uu_i0_i1_i2p3/126 - 5*uu_i0_i1_i2p4/1008 + uu_i0_i1_i2p5/3150)]"
*/
const REAL_SIMD_ARRAY uu_i0_i1_i2m5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-5)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-1)]);
const REAL_SIMD_ARRAY uu_i0_i1m5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-1,i2)]);
const REAL_SIMD_ARRAY uu_i0m5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-1,i1,i2)]);
const REAL_SIMD_ARRAY uu = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+1,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+1)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+5)]);
const REAL_SIMD_ARRAY vv = ReadSIMD(&in_gfs[IDX4(VVGF, i0,i1,i2)]);
const double tmpFDPart1_NegativeOne_ = -1.0;
const REAL_SIMD_ARRAY FDPart1_NegativeOne_ = ConstSIMD(tmpFDPart1_NegativeOne_);
const double tmpFDPart1_Rational_1_3150 = 1.0/3150.0;
const REAL_SIMD_ARRAY FDPart1_Rational_1_3150 = ConstSIMD(tmpFDPart1_Rational_1_3150);
const double tmpFDPart1_Rational_5269_1800 = 5269.0/1800.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5269_1800 = ConstSIMD(tmpFDPart1_Rational_5269_1800);
const double tmpFDPart1_Rational_5_1008 = 5.0/1008.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_1008 = ConstSIMD(tmpFDPart1_Rational_5_1008);
const double tmpFDPart1_Rational_5_126 = 5.0/126.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_126 = ConstSIMD(tmpFDPart1_Rational_5_126);
const double tmpFDPart1_Rational_5_21 = 5.0/21.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_21 = ConstSIMD(tmpFDPart1_Rational_5_21);
const double tmpFDPart1_Rational_5_3 = 5.0/3.0;
const REAL_SIMD_ARRAY FDPart1_Rational_5_3 = ConstSIMD(tmpFDPart1_Rational_5_3);
const REAL_SIMD_ARRAY FDPart1_0 = MulSIMD(FDPart1_Rational_5269_1800, uu);
const REAL_SIMD_ARRAY uu_dDD00 = MulSIMD(MulSIMD(invdx0, invdx0), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0m3_i1_i2, uu_i0p3_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0m1_i1_i2, uu_i0p1_i1_i2), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0m5_i1_i2, uu_i0p5_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0m4_i1_i2, uu_i0p4_i1_i2), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0m2_i1_i2, uu_i0p2_i1_i2), FDPart1_0))))));
const REAL_SIMD_ARRAY uu_dDD11 = MulSIMD(MulSIMD(invdx1, invdx1), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0_i1m3_i2, uu_i0_i1p3_i2), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0_i1m1_i2, uu_i0_i1p1_i2), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0_i1m5_i2, uu_i0_i1p5_i2), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0_i1m4_i2, uu_i0_i1p4_i2), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0_i1m2_i2, uu_i0_i1p2_i2), FDPart1_0))))));
const REAL_SIMD_ARRAY uu_dDD22 = MulSIMD(MulSIMD(invdx2, invdx2), FusedMulAddSIMD(FDPart1_Rational_5_126, AddSIMD(uu_i0_i1_i2m3, uu_i0_i1_i2p3), FusedMulAddSIMD(FDPart1_Rational_5_3, AddSIMD(uu_i0_i1_i2m1, uu_i0_i1_i2p1), FusedMulSubSIMD(FDPart1_Rational_1_3150, AddSIMD(uu_i0_i1_i2m5, uu_i0_i1_i2p5), FusedMulAddSIMD(FDPart1_Rational_5_1008, AddSIMD(uu_i0_i1_i2m4, uu_i0_i1_i2p4), FusedMulAddSIMD(FDPart1_Rational_5_21, AddSIMD(uu_i0_i1_i2m2, uu_i0_i1_i2p2), FDPart1_0))))));
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY __RHS_exp_0 = vv,
* const REAL_SIMD_ARRAY __RHS_exp_1 = wavespeed**2*(uu_dDD00 + uu_dDD11 + uu_dDD22)]"
*/
const REAL_SIMD_ARRAY __RHS_exp_0 = vv;
const REAL_SIMD_ARRAY __RHS_exp_1 = MulSIMD(MulSIMD(wavespeed, wavespeed), AddSIMD(uu_dDD00, AddSIMD(uu_dDD11, uu_dDD22)));
WriteSIMD(&rhs_gfs[IDX4(UUGF, i0, i1, i2)], __RHS_exp_0);
WriteSIMD(&rhs_gfs[IDX4(VVGF, i0, i1, i2)], __RHS_exp_1);
}
###Markdown
Step 3.a: Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation1}$$Here, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the three-spatial-dimension Scalar Wave equation (i.e., `uu_rhs` and `vv_rhs`) between1. this tutorial and 2. the [NRPy+ ScalarWave.ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) module.
###Code
# Step 10: We already have SymPy expressions for uu_rhs and vv_rhs in
# terms of other SymPy variables. Even if we reset the list
# of NRPy+ gridfunctions, these *SymPy* expressions for
# uu_rhs and vv_rhs *will remain unaffected*.
#
# Here, we will use the above-defined uu_rhs and vv_rhs to
# validate against the same expressions in the
# ScalarWave/ScalarWave_RHSs.py module,
# to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the ScalarWave_RHSs.py module itself.
#
# Reset the list of gridfunctions, as registering a gridfunction
# twice will spawn an error.
gri.glb_gridfcs_list = []
# Step 11: Call the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module,
# which should do exactly the same as in Steps 1-10 above.
import ScalarWave.ScalarWave_RHSs as swrhs
swrhs.ScalarWave_RHSs()
# Step 12: Consistency check between the tutorial notebook above
# and the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_rhs - swrhs.uu_rhs = "+str(sp.simplify(uu_rhs - swrhs.uu_rhs))+"\t\t (should be zero)")
print("vv_rhs - swrhs.vv_rhs = "+str(sp.simplify(vv_rhs - swrhs.vv_rhs))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_rhs - swrhs.uu_rhs = 0 (should be zero)
vv_rhs - swrhs.vv_rhs = 0 (should be zero)
###Markdown
Step 4: Setting up Initial Data for the Scalar Wave Equation \[Back to [top](toc)\]$$\label{id}$$ Step 4.a: The Monochromatic Plane-Wave Solution \[Back to [top](toc)\]$$\label{planewave}$$The solution to the scalar wave equation for a monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction is$$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$where $\hat{k}$ is a unit vector. We choose $f(\hat{k}\cdot\vec{x} - c t)$ to take the form$$f(\hat{k}\cdot\vec{x} - c t) = \sin\left(\hat{k}\cdot\vec{x} - c t\right) + 2,$$where we add the $+2$ to ensure that the exact solution never crosses through zero. In places where the exact solution passes through zero, the relative error (i.e., the measure of error to compare numerical with exact results) is undefined. Also, $f(\hat{k}\cdot\vec{x} - c t)$ plus a constant is still a solution to the wave equation.
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx # Sets the Cartesian coordinates xx[0]=x; xx[1]=y; xx[2]=z
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
kk = par.Cparameters("REAL", thismodule, ["kk0", "kk1", "kk2"],[1.0,1.0,1.0])
# Step 3: Normalize the k vector
kk_norm = sp.sqrt(kk[0]**2 + kk[1]**2 + kk[2]**2)
# Step 4: Compute k.x
dot_product = sp.sympify(0)
for i in range(DIM):
dot_product += xx[i]*kk[i]
dot_product /= kk_norm
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID_PlaneWave = sp.sin(dot_product - wavespeed*time)+2
vv_ID_PlaneWave = sp.diff(uu_ID_PlaneWave, time)
###Output
_____no_output_____
###Markdown
Next we verify that $f(\hat{k}\cdot\vec{x} - c t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\ f\left(\hat{k}\cdot\vec{x} - c t\right),$$and confirming the result is exactly zero.
###Code
sp.simplify(wavespeed**2*(sp.diff(uu_ID_PlaneWave,xx[0],2) +
sp.diff(uu_ID_PlaneWave,xx[1],2) +
sp.diff(uu_ID_PlaneWave,xx[2],2))
- sp.diff(uu_ID_PlaneWave,time,2))
###Output
_____no_output_____
###Markdown
Step 4.b: The Spherical Gaussian Solution \[Back to [top](toc)\]$$\label{sphericalgaussian}$$Here we will implement the spherical Gaussian solution, consists of ingoing and outgoing wave fronts:\begin{align}u(r,t) &= u_{\rm out}(r,t) + u_{\rm in}(r,t),\ \ \text{where}\\u_{\rm out}(r,t) &=\frac{r-ct}{r} \exp\left[\frac{-(r-ct)^2}{2 \sigma^2}\right] \\u_{\rm in}(r,t) &=\frac{r+ct}{r} \exp\left[\frac{-(r+ct)^2}{2 \sigma^2}\right] \\\end{align}where $c$ is the wavespeed, and $\sigma$ is the width of the Gaussian (i.e., the "standard deviation").
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx # Sets the Cartesian coordinates xx[0]=x; xx[1]=y; xx[2]=z
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
sigma = par.Cparameters("REAL", thismodule, "sigma",3.0)
# Step 4: Compute r
r = sp.sympify(0)
for i in range(DIM):
r += xx[i]**2
r = sp.sqrt(r)
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID_SphericalGaussianOUT = +(r - wavespeed*time)/r * sp.exp( -(r - wavespeed*time)**2 / (2*sigma**2) )
uu_ID_SphericalGaussianIN = +(r + wavespeed*time)/r * sp.exp( -(r + wavespeed*time)**2 / (2*sigma**2) )
uu_ID_SphericalGaussian = uu_ID_SphericalGaussianOUT + uu_ID_SphericalGaussianIN
vv_ID_SphericalGaussian = sp.diff(uu_ID_SphericalGaussian, time)
###Output
_____no_output_____
###Markdown
Since the wave equation is linear, both the leftgoing and rightgoing waves must satisfy the wave equation, which implies that their sum also satisfies the wave equation. Next we verify that $u(r,t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\left\{u_{\rm R}(r,t)\right\},$$and$$\left(c^2 \nabla^2 - \partial_t^2 \right)\left\{u_{\rm L}(r,t)\right\},$$are separately zero. We do this because SymPy has difficulty simplifying the combined expression.
###Code
print(sp.simplify(wavespeed**2*(sp.diff(uu_ID_SphericalGaussianOUT,xx[0],2) +
sp.diff(uu_ID_SphericalGaussianOUT,xx[1],2) +
sp.diff(uu_ID_SphericalGaussianOUT,xx[2],2))
- sp.diff(uu_ID_SphericalGaussianOUT,time,2)) )
print(sp.simplify(wavespeed**2*(sp.diff(uu_ID_SphericalGaussianIN,xx[0],2) +
sp.diff(uu_ID_SphericalGaussianIN,xx[1],2) +
sp.diff(uu_ID_SphericalGaussianIN,xx[2],2))
- sp.diff(uu_ID_SphericalGaussianIN,time,2)))
###Output
0
0
###Markdown
Step 5: Code Validation against `ScalarWave.InitialData` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation2}$$As a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between1. this tutorial and 2. the NRPy+ [ScalarWave.InitialData](../edit/ScalarWave/InitialData.py) module.
###Code
# We just defined SymPy expressions for uu_ID and vv_ID in
# terms of other SymPy variables. Here, we will use the
# above-defined uu_ID and vv_ID to validate against the
# same expressions in the ScalarWave/InitialData.py
# module, to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the PlaneWave ID module itself.
#
# Step 6: Call the InitialData(Type="PlaneWave") function from within the
# ScalarWave/InitialData.py module,
# which should do exactly the same as in Steps 1-5 above.
import ScalarWave.InitialData as swid
swid.InitialData(Type="PlaneWave")
# Step 7: Consistency check between the tutorial notebook above
# and the PlaneWave option from within the
# ScalarWave/InitialData.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module: PlaneWave Case")
if sp.simplify(uu_ID_PlaneWave - swid.uu_ID) != 0:
print("TEST FAILED: uu_ID_PlaneWave - swid.uu_ID = "+str(sp.simplify(uu_ID_PlaneWave - swid.uu_ID))+"\t\t (should be zero)")
sys.exit(1)
if sp.simplify(vv_ID_PlaneWave - swid.vv_ID) != 0:
print("TEST FAILED: vv_ID_PlaneWave - swid.vv_ID = "+str(sp.simplify(vv_ID_PlaneWave - swid.vv_ID))+"\t\t (should be zero)")
sys.exit(1)
print("TESTS PASSED!")
# Step 8: Consistency check between the tutorial notebook above
# and the SphericalGaussian option from within the
# ScalarWave/InitialData.py module.
swid.InitialData(Type="SphericalGaussian")
print("Consistency check between ScalarWave tutorial and NRPy+ module: SphericalGaussian Case")
if sp.simplify(uu_ID_SphericalGaussian - swid.uu_ID) != 0:
print("TEST FAILED: uu_ID_SphericalGaussian - swid.uu_ID = "+str(sp.simplify(uu_ID_SphericalGaussian - swid.uu_ID))+"\t\t (should be zero)")
sys.exit(1)
if sp.simplify(vv_ID_SphericalGaussian - swid.vv_ID) != 0:
print("TEST FAILED: vv_ID_SphericalGaussian - swid.vv_ID = "+str(sp.simplify(vv_ID_SphericalGaussian - swid.vv_ID))+"\t\t (should be zero)")
sys.exit(1)
print("TESTS PASSED!")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module: PlaneWave Case
TESTS PASSED!
Consistency check between ScalarWave tutorial and NRPy+ module: SphericalGaussian Case
TESTS PASSED!
###Markdown
Step 6: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-ScalarWave.pdf](Tutorial-ScalarWave.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ScalarWave")
###Output
Created Tutorial-ScalarWave.tex, and compiled LaTeX file to PDF file
Tutorial-ScalarWave.pdf
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Generating C Code for the Scalar Wave Equation in Cartesian Coordinates Author: Zach Etienne Formatting improvements courtesy Brandon Clark This module generates the C Code for the Scalarwave in Cartesian coordinates and sets up [Plane Wave Initial Data](https://en.wikipedia.org/wiki/Initial_value_problem). **Notebook Status:** Validated **Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](code_validation). In addition, all expressions have been validated against a trusted code (the [original SENR/NRPy+ code](https://bitbucket.org/zach_etienne/nrpy)). NRPy+ Source Code for this module: * [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py)* [ScalarWave/InitialData_PlaneWave.py](../edit/ScalarWave/InitialData_PlaneWave.py) Introduction: Problem StatementWe wish to numerically solve the scalar wave equation as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem) in Cartesian coordinates:$$\partial_t^2 u = c^2 \nabla^2 u \text{,}$$where $u$ (the amplitude of the wave) is a function of time and space: $u = u(t,x,y,...)$ (spatial dimension as-yet unspecified) and $c$ is the wave speed, subject to some initial condition$$u(0,x,y,...) = f(x,y,...)$$and suitable spatial boundary conditions.As described in the next section, we will find it quite useful to define$$v(t,x,y,...) = \partial_t u(t,x,y,...).$$In this way, the second-order PDE is reduced to a set of two coupled first-order PDEs\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \nabla^2 u.\end{align}We will use NRPy+ to generate efficient C codes capable of generating both initial data $u(0,x,y,...) = f(x,y,...)$; $v(0,x,y,...)=g(x,y,...)$, as well as finite-difference expressions for the right-hand sides of the above expressions. These expressions are needed within the *Method of Lines* to "integrate" the solution forward in time. The Method of LinesOnce we have initial data, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs)**, so long as the initial value problem PDE can be written in the form$$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$where $\mathbf{M}$ is an $N\times N$ matrix filled with differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$. The scalar wave equation as written in the [previous module](Tutorial-ScalarWave.ipynb)\begin{equation}\partial_t \begin{bmatrix}u \\v \end{bmatrix}=\begin{bmatrix}0 & 1 \\c^2 \nabla^2 & 0 \end{bmatrix}\begin{bmatrix}u \\v \end{bmatrix}\end{equation}satisfies this requirement. Thus we can treat the spatial derivatives $\nabla^2 u$ of the scalar wave equation using **standard finite-difference approaches**, and the temporal derivatives $\partial_t u$ and $\partial_t v$ using **standard approaches for solving ODEs**. In [the next module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will apply the highly robust [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4), used widely for numerically solving ODEs, to "march" (integrate) the solution vector $\vec{f}$ forward in time from its initial value ("initial data"). Basic AlgorithmThe basic algorithm for solving the scalar wave equation [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), based on the Method of Lines (see section above) is outlined below, with NRPy+-based components highlighted in green. We will review how NRPy+ generates these core components in this module.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. Set gridfunction values to initial data.1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. Evaluate scalar wave RHS expressions. 1. Apply boundary conditions.**We refer to the right-hand side of the equation $\partial_t \vec{f} = \mathbf{M}\ \vec{f}$ as the RHS. In this case, we refer to the $\mathbf{M}\ \vec{f}$ as the "scalar wave RHSs".** In the following sections we will 1. Use NRPy+ to cast the scalar wave RHS expressions -- in finite difference form -- into highly efficient C code, 1. first in one spatial dimension with fourth-order finite differences, 1. and then in three spatial dimensions with tenth-order finite differences.1. Use NRPy+ to generate monochromatic plane-wave initial data for the scalar wave equation, where the wave propagates in an arbitrary direction.As for the $\nabla^2 u$ term, spatial derivatives are handled in NRPy+ via [finite differencing](https://en.wikipedia.org/wiki/Finite_difference).We will sample the solution $\{u,v\}$ at discrete, uniformly-sampled points in space and time. For simplicity, let's assume that we consider the wave equation in one spatial dimension. Then the solution at any sampled point in space and time is given by$$u^n_i = u(t_n,x_i) = u(t_0 + n \Delta t, x_0 + i \Delta x),$$where $\Delta t$ and $\Delta x$ represent the temporal and spatial resolution, respectively. $v^n_i$ is sampled at the same points in space and time. Table of Contents$$\label{toc}$$1. [Step 1](initializenrpy): Initialize core NRPy+ modules1. [Step 2](rhss1d): Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing1. [Step 3](rhss3d): Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing 1. [Step 3.a](code_validation1): Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module1. [Step 4](planewavesoln): Plane-Wave Solution of the Scalar Wave Equation 1. [Step 4.a](code_validation2): Code Validation against `ScalarWave.InitialData_PlaneWave` NRPy+ module1. [Step 5](latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file Step 1: Initialize core NRPy+ modules \[Back to [top](toc)\]$$\label{initializenrpy}$$Let's start by importing all the needed modules from NRPy+:
###Code
# Step P1: Import needed NRPy+ core modules:
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import finite_difference as fin
from outputC import *
###Output
_____no_output_____
###Markdown
Step 2: Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss1d}$$To minimize complication, we will first restrict ourselves to solving the wave equation in one spatial dimension, so$$\nabla^2 u = \partial_x^2 u.$$Extension of this operator to higher spatial dimensions is straightforward, particularly when using NRPy+.As was discussed in [the finite difference section of the tutorial](Tutorial-Finite_Difference_Derivatives.ipynb), NRPy+ approximates derivatives using [finite difference methods](), the second-order derivative $\partial_x^2$ accurate to fourth-order in uniform grid spacing $\Delta x$ (from fitting the unique 4th-degree polynomial to 5 sample points of $u$) is given by\begin{equation}\left[\partial_x^2 u(t,x)\right]_j = \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right)+ \mathcal{O}\left((\Delta x)^4\right).\end{equation}
###Code
# Step P2: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
thismodule = "ScalarWave"
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 1: Set the spatial dimension parameter, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",1)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4)
# Step 3: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 4: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 5: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
vv_rhs = sp.simplify(vv_rhs)
# Step 6: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)])
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expression:
* "const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1/3 - uu_i0m2/12 + 4*uu_i0p1/3 - uu_i0p2/12)"
*/
const double uu_i0m2 = in_gfs[IDX2(UUGF, i0-2)];
const double uu_i0m1 = in_gfs[IDX2(UUGF, i0-1)];
const double uu = in_gfs[IDX2(UUGF, i0)];
const double uu_i0p1 = in_gfs[IDX2(UUGF, i0+1)];
const double uu_i0p2 = in_gfs[IDX2(UUGF, i0+2)];
const double vv = in_gfs[IDX2(VVGF, i0)];
const double uu_dDD00 = pow(invdx0, 2)*(-5.0/2.0*uu + (4.0/3.0)*uu_i0m1 - 1.0/12.0*uu_i0m2 + (4.0/3.0)*uu_i0p1 - 1.0/12.0*uu_i0p2);
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[rhs_gfs[IDX2(UUGF, i0)] = vv,
* rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*wavespeed**2]"
*/
rhs_gfs[IDX2(UUGF, i0)] = vv;
rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*pow(wavespeed, 2);
}
###Markdown
**Success!** Notice that indeed NRPy+ was able to compute the spatial derivative operator,\begin{equation}\left[\partial_x^2 u(t,x)\right]_j \approx \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right),\end{equation}correctly (easier to read in the "Original SymPy expressions" comment block at the top of the C output. Note that `invdx0`$=1/\Delta x_0$, where $\Delta x_0$ is the (uniform) grid spacing in the zeroth, or $x_0$ direction. Step 3: Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing \[Back to [top](toc)\]$$\label{rhss3d}$$Let's next repeat the same process, only this time at **10th** finite difference order, for the **3-spatial-dimension** scalar wave equation, with SIMD enabled:
###Code
# Step 1: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 2: Set the spatial dimension parameter
# to *FOUR* this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 3: Set the finite differencing order to 10.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",10)
# Step 4a: Reset gridfunctions registered in 1D case above,
# to avoid NRPy+ throwing an error about double-
# registering gridfunctions, which is not allowed.
gri.glb_gridfcs_list = []
# Step 4b: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 5: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 6: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
# Step 7: Simplify the expression for c^2 \nabla^2 u (a.k.a., vv_rhs):
vv_rhs = sp.simplify(vv_rhs)
# Step 8: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)],params="SIMD_enable=True")
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY uu_dDD00 = invdx0**2*(-5269*uu/1800 + 5*uu_i0m1_i1_i2/3 - 5*uu_i0m2_i1_i2/21 + 5*uu_i0m3_i1_i2/126 - 5*uu_i0m4_i1_i2/1008 + uu_i0m5_i1_i2/3150 + 5*uu_i0p1_i1_i2/3 - 5*uu_i0p2_i1_i2/21 + 5*uu_i0p3_i1_i2/126 - 5*uu_i0p4_i1_i2/1008 + uu_i0p5_i1_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD11 = invdx1**2*(-5269*uu/1800 + 5*uu_i0_i1m1_i2/3 - 5*uu_i0_i1m2_i2/21 + 5*uu_i0_i1m3_i2/126 - 5*uu_i0_i1m4_i2/1008 + uu_i0_i1m5_i2/3150 + 5*uu_i0_i1p1_i2/3 - 5*uu_i0_i1p2_i2/21 + 5*uu_i0_i1p3_i2/126 - 5*uu_i0_i1p4_i2/1008 + uu_i0_i1p5_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD22 = invdx2**2*(-5269*uu/1800 + 5*uu_i0_i1_i2m1/3 - 5*uu_i0_i1_i2m2/21 + 5*uu_i0_i1_i2m3/126 - 5*uu_i0_i1_i2m4/1008 + uu_i0_i1_i2m5/3150 + 5*uu_i0_i1_i2p1/3 - 5*uu_i0_i1_i2p2/21 + 5*uu_i0_i1_i2p3/126 - 5*uu_i0_i1_i2p4/1008 + uu_i0_i1_i2p5/3150)]"
*/
const REAL_SIMD_ARRAY uu_i0_i1_i2m5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-5)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-1)]);
const REAL_SIMD_ARRAY uu_i0_i1m5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-1,i2)]);
const REAL_SIMD_ARRAY uu_i0m5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-1,i1,i2)]);
const REAL_SIMD_ARRAY uu = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+1,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+1)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+5)]);
const REAL_SIMD_ARRAY vv = ReadSIMD(&in_gfs[IDX4(VVGF, i0,i1,i2)]);
const double tmpFD_Rational_1_3150_FDcoeff = 0.0003174603174603174603174603174603175;
const REAL_SIMD_ARRAY _Rational_1_3150_FDcoeff = ConstSIMD(tmpFD_Rational_1_3150_FDcoeff);
const double tmpFD_Rational_5_126_FDcoeff = 0.03968253968253968253968253968253968;
const REAL_SIMD_ARRAY _Rational_5_126_FDcoeff = ConstSIMD(tmpFD_Rational_5_126_FDcoeff);
const double tmpFD_Rational_5_3_FDcoeff = 1.666666666666666666666666666666667;
const REAL_SIMD_ARRAY _Rational_5_3_FDcoeff = ConstSIMD(tmpFD_Rational_5_3_FDcoeff);
const double tmpFD_Rational_m5269_1800_FDcoeff = -2.927222222222222222222222222222222;
const REAL_SIMD_ARRAY _Rational_m5269_1800_FDcoeff = ConstSIMD(tmpFD_Rational_m5269_1800_FDcoeff);
const double tmpFD_Rational_m5_1008_FDcoeff = -0.004960317460317460317460317460317460;
const REAL_SIMD_ARRAY _Rational_m5_1008_FDcoeff = ConstSIMD(tmpFD_Rational_m5_1008_FDcoeff);
const double tmpFD_Rational_m5_21_FDcoeff = -0.2380952380952380952380952380952381;
const REAL_SIMD_ARRAY _Rational_m5_21_FDcoeff = ConstSIMD(tmpFD_Rational_m5_21_FDcoeff);
const REAL_SIMD_ARRAY tmpFD0 = MulSIMD(uu, _Rational_m5269_1800_FDcoeff);
const REAL_SIMD_ARRAY uu_dDD00 = MulSIMD(FusedMulAddSIMD(uu_i0p4_i1_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0p3_i1_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0p2_i1_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0p1_i1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0m5_i1_i2, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0m4_i1_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0m3_i1_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0m2_i1_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0m1_i1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0p5_i1_i2, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx0, invdx0));
const REAL_SIMD_ARRAY uu_dDD11 = MulSIMD(FusedMulAddSIMD(uu_i0_i1p4_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1p3_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1p2_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1p1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1m5_i2, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0_i1m4_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1m3_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1m2_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1m1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1p5_i2, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx1, invdx1));
const REAL_SIMD_ARRAY uu_dDD22 = MulSIMD(FusedMulAddSIMD(uu_i0_i1_i2p4, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p3, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p1, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m5, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m4, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m3, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m1, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p5, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx2, invdx2));
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY __RHS_exp_0 = vv,
* const REAL_SIMD_ARRAY __RHS_exp_1 = wavespeed**2*(uu_dDD00 + uu_dDD11 + uu_dDD22)]"
*/
const REAL_SIMD_ARRAY __RHS_exp_0 = vv;
const REAL_SIMD_ARRAY __RHS_exp_1 = MulSIMD(AddSIMD(uu_dDD11, AddSIMD(uu_dDD00, uu_dDD22)), MulSIMD(wavespeed, wavespeed));
WriteSIMD(&rhs_gfs[IDX4(UUGF, i0, i1, i2)], __RHS_exp_0);
WriteSIMD(&rhs_gfs[IDX4(VVGF, i0, i1, i2)], __RHS_exp_1);
}
###Markdown
Step 3.a: Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation1}$$Here, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the three-spatial-dimension Scalar Wave equation (i.e., `uu_rhs` and `vv_rhs`) between1. this tutorial and 2. the [NRPy+ ScalarWave.ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) module.
###Code
# Step 10: We already have SymPy expressions for uu_rhs and vv_rhs in
# terms of other SymPy variables. Even if we reset the list
# of NRPy+ gridfunctions, these *SymPy* expressions for
# uu_rhs and vv_rhs *will remain unaffected*.
#
# Here, we will use the above-defined uu_rhs and vv_rhs to
# validate against the same expressions in the
# ScalarWave/ScalarWave_RHSs.py module,
# to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the ScalarWave_RHSs.py module itself.
#
# Reset the list of gridfunctions, as registering a gridfunction
# twice will spawn an error.
gri.glb_gridfcs_list = []
# Step 11: Call the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module,
# which should do exactly the same as in Steps 1-10 above.
import ScalarWave.ScalarWave_RHSs as swrhs
swrhs.ScalarWave_RHSs()
# Step 12: Consistency check between the tutorial notebook above
# and the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_rhs - swrhs.uu_rhs = "+str(sp.simplify(uu_rhs - swrhs.uu_rhs))+"\t\t (should be zero)")
print("vv_rhs - swrhs.vv_rhs = "+str(sp.simplify(vv_rhs - swrhs.vv_rhs))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_rhs - swrhs.uu_rhs = 0 (should be zero)
vv_rhs - swrhs.vv_rhs = 0 (should be zero)
###Markdown
Step 4: Plane-Wave Solution of the Scalar Wave Equation \[Back to [top](toc)\]$$\label{planewavesoln}$$The solution to the scalar wave equation for a monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction is$$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$where $\hat{k}$ is a unit vector. We choose $f(\hat{k}\cdot\vec{x} - c t)$ to take the form$$f(\hat{k}\cdot\vec{x} - c t) = \sin\left(\hat{k}\cdot\vec{x} - c t\right) + 2,$$where we add the $+2$ to ensure that the exact solution never crosses through zero. In places where the exact solution passes through zero, the relative error (i.e., the measure of error to compare numerical with exact results) is undefined. Also, $f(\hat{k}\cdot\vec{x} - c t)$ plus a constant is still a solution to the wave equation.
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
kk = par.Cparameters("REAL", thismodule, ["kk0", "kk1", "kk2"],[1.0,1.0,1.0])
# Step 3: Normalize the k vector
kk_norm = sp.sqrt(kk[0]**2 + kk[1]**2 + kk[2]**2)
# Step 4: Compute k.x
dot_product = sp.sympify(0)
for i in range(DIM):
dot_product += xx[i]*kk[i]
dot_product /= kk_norm
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID = sp.sin(dot_product - wavespeed*time)+2
vv_ID = sp.diff(uu_ID, time)
###Output
_____no_output_____
###Markdown
Next we verify that $f(\hat{k}\cdot\vec{x} - c t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\ f\left(\hat{k}\cdot\vec{x} - c t\right),$$and confirming the result is exactly zero.
###Code
sp.simplify(wavespeed**2*(sp.diff(uu_ID,xx[0],2) +
sp.diff(uu_ID,xx[1],2) +
sp.diff(uu_ID,xx[2],2))
- sp.diff(uu_ID,time,2))
###Output
_____no_output_____
###Markdown
Step 4.a: Code Validation against `ScalarWave.InitialData_PlaneWave` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation2}$$As a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between1. this tutorial and 2. the NRPy+ [ScalarWave.InitialData_PlaneWave](../edit/ScalarWave/InitialData_PlaneWave.py) module.
###Code
# We just defined SymPy expressions for uu_ID and vv_ID in
# terms of other SymPy variables. Here, we will use the
# above-defined uu_ID and vv_ID to validate against the
# same expressions in the ScalarWave/InitialData_PlaneWave.py
# module, to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the PlaneWave ID module itself.
#
# Step 6: Call the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module,
# which should do exactly the same as in Steps 1-5 above.
import ScalarWave.InitialData_PlaneWave as swid
swid.InitialData_PlaneWave()
# Step 7: Consistency check between the tutorial notebook above
# and the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_ID - swid.uu_ID = "+str(sp.simplify(uu_ID - swid.uu_ID))+"\t\t (should be zero)")
print("vv_ID - swid.vv_ID = "+str(sp.simplify(vv_ID - swid.vv_ID))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_ID - swid.uu_ID = 0 (should be zero)
vv_ID - swid.vv_ID = 0 (should be zero)
###Markdown
Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-ScalarWave.pdf](Tutorial-ScalarWave.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-ScalarWave.ipynb
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!rm -f Tut*.out Tut*.aux Tut*.log
###Output
[NbConvertApp] Converting notebook Tutorial-ScalarWave.ipynb to latex
[NbConvertApp] Writing 64856 bytes to Tutorial-ScalarWave.tex
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
This is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)
restricted \write18 enabled.
entering extended mode
###Markdown
Generating C Code for the Scalar Wave Equation in Cartesian Coordinates NRPy+ Source Code for this module: [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py); [ScalarWave/InitialData_PlaneWave.py](../edit/ScalarWave/InitialData_PlaneWave.py) Problem StatementWe wish to numerically solve the scalar wave equation as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem) in Cartesian coordinates:$$\partial_t^2 u = c^2 \nabla^2 u \text{,}$$where $u$ (the amplitude of the wave) is a function of time and space: $u = u(t,x,y,...)$ (spatial dimension as-yet unspecified) and $c$ is the wave speed, subject to some initial condition$$u(0,x,y,...) = f(x,y,...)$$and suitable spatial boundary conditions.As described in the next section, we will find it quite useful to define$$v(t,x,y,...) = \partial_t u(t,x,y,...).$$In this way, the second-order PDE is reduced to a set of two coupled first-order PDEs\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \nabla^2 u.\end{align}We will use NRPy+ to generate efficient C codes capable of generating both initial data $u(0,x,y,...) = f(x,y,...)$; $v(0,x,y,...)=g(x,y,...)$, as well as finite-difference expressions for the right-hand sides of the above expressions. These expressions are needed within the *Method of Lines* to "integrate" the solution forward in time. The Method of LinesOnce we have initial data, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs)**, so long as the initial value problem PDE can be written in the form$$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$where $\mathbf{M}$ is an $N\times N$ matrix filled with differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$. The scalar wave equation as written in the [previous module](Tutorial-ScalarWave.ipynb)\begin{equation}\partial_t \begin{bmatrix}u \\v \end{bmatrix}=\begin{bmatrix}0 & 1 \\c^2 \nabla^2 & 0 \end{bmatrix}\begin{bmatrix}u \\v \end{bmatrix}\end{equation}satisfies this requirement. Thus we can treat the spatial derivatives $\nabla^2 u$ of the scalar wave equation using **standard finite-difference approaches**, and the temporal derivatives $\partial_t u$ and $\partial_t v$ using **standard approaches for solving ODEs**. In [the next module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will apply the highly robust [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4), used widely for numerically solving ODEs, to "march" (integrate) the solution vector $\vec{f}$ forward in time from its initial value ("initial data"). Basic AlgorithmThe basic algorithm for solving the scalar wave equation [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), based on the Method of Lines (see section above) is outlined below, with NRPy+-based components highlighted in green. We will review how NRPy+ generates these core components in this module.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. Set gridfunction values to initial data.1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. Evaluate scalar wave RHS expressions. 1. Apply boundary conditions.**We refer to the right-hand side of the equation $\partial_t \vec{f} = \mathbf{M}\ \vec{f}$ as the RHS. In this case, we refer to the $\mathbf{M}\ \vec{f}$ as the "scalar wave RHSs".** In the following sections we will 1. Use NRPy+ to cast the scalar wave RHS expressions -- in finite difference form -- into highly efficient C code, 1. first in one spatial dimension with fourth-order finite differences, 1. and then in three spatial dimensions with tenth-order finite differences.1. Use NRPy+ to generate monochromatic plane-wave initial data for the scalar wave equation, where the wave propagates in an arbitrary direction.As for the $\nabla^2 u$ term, spatial derivatives are handled in NRPy+ via [finite differencing](https://en.wikipedia.org/wiki/Finite_difference).We will sample the solution $\{u,v\}$ at discrete, uniformly-sampled points in space and time. For simplicity, let's assume that we consider the wave equation in one spatial dimension. Then the solution at any sampled point in space and time is given by$$u^n_i = u(t_n,x_i) = u(t_0 + n \Delta t, x_0 + i \Delta x),$$where $\Delta t$ and $\Delta x$ represent the temporal and spatial resolution, respectively. $v^n_i$ is sampled at the same points in space and time. Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite DifferencingTo minimize complication, we will first restrict ourselves to solving the wave equation in one spatial dimension, so$$\nabla^2 u = \partial_x^2 u.$$Extension of this operator to higher spatial dimensions is straightforward, particularly when using NRPy+.As was discussed in [the finite difference section of the tutorial](Tutorial-Finite_Difference_Derivatives.ipynb), NRPy+ approximates derivatives using [finite difference methods](), the second-order derivative $\partial_x^2$ accurate to fourth-order in uniform grid spacing $\Delta x$ (from fitting the unique 4th-degree polynomial to 5 sample points of $u$) is given by\begin{equation}\left[\partial_x^2 u(t,x)\right]_j = \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right)+ \mathcal{O}\left((\Delta x)^4\right).\end{equation}
###Code
# Step P1: Import needed NRPy+ core modules:
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import finite_difference as fin
from outputC import *
# Step P2: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
thismodule = "ScalarWave"
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed")
# Step 1: Set the spatial dimension parameter, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",1)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4)
# Step 3: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 4: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 5: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
vv_rhs = sp.simplify(vv_rhs)
# Step 6: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)])
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expression:
* "const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1/3 - uu_i0m2/12 + 4*uu_i0p1/3 - uu_i0p2/12)"
*/
const double uu_i0m2 = in_gfs[IDX2(UUGF, i0-2)];
const double uu_i0m1 = in_gfs[IDX2(UUGF, i0-1)];
const double uu = in_gfs[IDX2(UUGF, i0)];
const double uu_i0p1 = in_gfs[IDX2(UUGF, i0+1)];
const double uu_i0p2 = in_gfs[IDX2(UUGF, i0+2)];
const double vv = in_gfs[IDX2(VVGF, i0)];
const double uu_dDD00 = pow(invdx0, 2)*(-5.0/2.0*uu + (4.0/3.0)*uu_i0m1 - 1.0/12.0*uu_i0m2 + (4.0/3.0)*uu_i0p1 - 1.0/12.0*uu_i0p2);
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[rhs_gfs[IDX2(UUGF, i0)] = vv,
* rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*wavespeed**2]"
*/
rhs_gfs[IDX2(UUGF, i0)] = vv;
rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*pow(wavespeed, 2);
}
###Markdown
**Success!** Notice that indeed NRPy+ was able to compute the spatial derivative operator,\begin{equation}\left[\partial_x^2 u(t,x)\right]_j \approx \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right),\end{equation}correctly (easier to read in the "Original SymPy expressions" comment block at the top of the C output. Note that $\texttt{invdx0}=1/\Delta x_0$, where $\Delta x_0$ is the (uniform) grid spacing in the zeroth, or $x_0$ direction. Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite DifferencingLet's next repeat the same process, only this time at **10th** finite difference order, for the **3-spatial-dimension** scalar wave equation, with SIMD enabled:
###Code
# Step 1: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed")
# Step 2: Set the spatial dimension parameter
# to *FOUR* this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 3: Set the finite differencing order to 10.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",10)
# Step 4a: Reset gridfunctions registered in 1D case above,
# to avoid NRPy+ throwing an error about double-
# registering gridfunctions, which is not allowed.
gri.glb_gridfcs_list = []
# Step 4b: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 5: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 6: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
# Step 7: Simplify the expression for c^2 \nabla^2 u (a.k.a., vv_rhs):
vv_rhs = sp.simplify(vv_rhs)
# Step 8: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)],params="SIMD_enable=True")
###Output
initialize_param() minor warning: Did nothing; already initialized parameter ScalarWave::wavespeed
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY uu_dDD00 = invdx0**2*(-5269*uu/1800 + 5*uu_i0m1_i1_i2/3 - 5*uu_i0m2_i1_i2/21 + 5*uu_i0m3_i1_i2/126 - 5*uu_i0m4_i1_i2/1008 + uu_i0m5_i1_i2/3150 + 5*uu_i0p1_i1_i2/3 - 5*uu_i0p2_i1_i2/21 + 5*uu_i0p3_i1_i2/126 - 5*uu_i0p4_i1_i2/1008 + uu_i0p5_i1_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD11 = invdx1**2*(-5269*uu/1800 + 5*uu_i0_i1m1_i2/3 - 5*uu_i0_i1m2_i2/21 + 5*uu_i0_i1m3_i2/126 - 5*uu_i0_i1m4_i2/1008 + uu_i0_i1m5_i2/3150 + 5*uu_i0_i1p1_i2/3 - 5*uu_i0_i1p2_i2/21 + 5*uu_i0_i1p3_i2/126 - 5*uu_i0_i1p4_i2/1008 + uu_i0_i1p5_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD22 = invdx2**2*(-5269*uu/1800 + 5*uu_i0_i1_i2m1/3 - 5*uu_i0_i1_i2m2/21 + 5*uu_i0_i1_i2m3/126 - 5*uu_i0_i1_i2m4/1008 + uu_i0_i1_i2m5/3150 + 5*uu_i0_i1_i2p1/3 - 5*uu_i0_i1_i2p2/21 + 5*uu_i0_i1_i2p3/126 - 5*uu_i0_i1_i2p4/1008 + uu_i0_i1_i2p5/3150)]"
*/
const REAL_SIMD_ARRAY uu_i0_i1_i2m5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-5)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-1)]);
const REAL_SIMD_ARRAY uu_i0_i1m5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-1,i2)]);
const REAL_SIMD_ARRAY uu_i0m5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-1,i1,i2)]);
const REAL_SIMD_ARRAY uu = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+1,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+1)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+5)]);
const REAL_SIMD_ARRAY vv = ReadSIMD(&in_gfs[IDX4(VVGF, i0,i1,i2)]);
const double tmpFD_Rational_1_3150 = 0.0003174603174603174603174603174603175;
const REAL_SIMD_ARRAY = Set1SIMD(tmpFD_Rational_1_3150);
const double tmpFD_Rational_5_126 = 0.03968253968253968253968253968253968;
const REAL_SIMD_ARRAY = Set1SIMD(tmpFD_Rational_5_126);
const double tmpFD_Rational_5_3 = 1.666666666666666666666666666666667;
const REAL_SIMD_ARRAY = Set1SIMD(tmpFD_Rational_5_3);
const double tmpFD_Rational_m5269_1800 = -2.927222222222222222222222222222222;
const REAL_SIMD_ARRAY = Set1SIMD(tmpFD_Rational_m5269_1800);
const double tmpFD_Rational_m5_1008 = -0.004960317460317460317460317460317460;
const REAL_SIMD_ARRAY = Set1SIMD(tmpFD_Rational_m5_1008);
const double tmpFD_Rational_m5_21 = -0.2380952380952380952380952380952381;
const REAL_SIMD_ARRAY = Set1SIMD(tmpFD_Rational_m5_21);
const REAL_SIMD_ARRAY tmpFD0 = MulSIMD(uu, _Rational_m5269_1800);
const REAL_SIMD_ARRAY uu_dDD00 = MulSIMD(FusedMulAddSIMD(uu_i0p4_i1_i2, _Rational_m5_1008, FusedMulAddSIMD(uu_i0p3_i1_i2, _Rational_5_126, FusedMulAddSIMD(uu_i0p2_i1_i2, _Rational_m5_21, FusedMulAddSIMD(uu_i0p1_i1_i2, _Rational_5_3, FusedMulAddSIMD(uu_i0m5_i1_i2, _Rational_1_3150, FusedMulAddSIMD(uu_i0m4_i1_i2, _Rational_m5_1008, FusedMulAddSIMD(uu_i0m3_i1_i2, _Rational_5_126, FusedMulAddSIMD(uu_i0m2_i1_i2, _Rational_m5_21, FusedMulAddSIMD(uu_i0m1_i1_i2, _Rational_5_3, FusedMulAddSIMD(uu_i0p5_i1_i2, _Rational_1_3150, tmpFD0)))))))))), MulSIMD(invdx0, invdx0));
const REAL_SIMD_ARRAY uu_dDD11 = MulSIMD(FusedMulAddSIMD(uu_i0_i1p4_i2, _Rational_m5_1008, FusedMulAddSIMD(uu_i0_i1p3_i2, _Rational_5_126, FusedMulAddSIMD(uu_i0_i1p2_i2, _Rational_m5_21, FusedMulAddSIMD(uu_i0_i1p1_i2, _Rational_5_3, FusedMulAddSIMD(uu_i0_i1m5_i2, _Rational_1_3150, FusedMulAddSIMD(uu_i0_i1m4_i2, _Rational_m5_1008, FusedMulAddSIMD(uu_i0_i1m3_i2, _Rational_5_126, FusedMulAddSIMD(uu_i0_i1m2_i2, _Rational_m5_21, FusedMulAddSIMD(uu_i0_i1m1_i2, _Rational_5_3, FusedMulAddSIMD(uu_i0_i1p5_i2, _Rational_1_3150, tmpFD0)))))))))), MulSIMD(invdx1, invdx1));
const REAL_SIMD_ARRAY uu_dDD22 = MulSIMD(FusedMulAddSIMD(uu_i0_i1_i2p4, _Rational_m5_1008, FusedMulAddSIMD(uu_i0_i1_i2p3, _Rational_5_126, FusedMulAddSIMD(uu_i0_i1_i2p2, _Rational_m5_21, FusedMulAddSIMD(uu_i0_i1_i2p1, _Rational_5_3, FusedMulAddSIMD(uu_i0_i1_i2m5, _Rational_1_3150, FusedMulAddSIMD(uu_i0_i1_i2m4, _Rational_m5_1008, FusedMulAddSIMD(uu_i0_i1_i2m3, _Rational_5_126, FusedMulAddSIMD(uu_i0_i1_i2m2, _Rational_m5_21, FusedMulAddSIMD(uu_i0_i1_i2m1, _Rational_5_3, FusedMulAddSIMD(uu_i0_i1_i2p5, _Rational_1_3150, tmpFD0)))))))))), MulSIMD(invdx2, invdx2));
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY __RHS_exp_0 = vv,
* const REAL_SIMD_ARRAY __RHS_exp_1 = wavespeed**2*(uu_dDD00 + uu_dDD11 + uu_dDD22)]"
*/
const REAL_SIMD_ARRAY __RHS_exp_0 = vv;
const REAL_SIMD_ARRAY __RHS_exp_1 = MulSIMD(AddSIMD(uu_dDD11, AddSIMD(uu_dDD00, uu_dDD22)), MulSIMD(wavespeed, wavespeed));
WriteSIMD(&rhs_gfs[IDX4(UUGF, i0, i1, i2)], __RHS_exp_0);
WriteSIMD(&rhs_gfs[IDX4(VVGF, i0, i1, i2)], __RHS_exp_1);
}
###Markdown
NRPy+ Module Code ValidationHere, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the three-spatial-dimension Scalar Wave equation (i.e., uu_rhs and vv_rhs) between1. this tutorial and 2. the NRPy+ ScalarWave/ScalarWave_RHSs.py module.
###Code
# Step 10: We already have SymPy expressions for uu_rhs and vv_rhs in
# terms of other SymPy variables. Even if we reset the list
# of NRPy+ gridfunctions, these *SymPy* expressions for
# uu_rhs and vv_rhs *will remain unaffected*.
#
# Here, we will use the above-defined uu_rhs and vv_rhs to
# validate against the same expressions in the
# ScalarWave/ScalarWave_RHSs.py module,
# to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the ScalarWave_RHSs.py module itself.
#
# Reset the list of gridfunctions, as registering a gridfunction
# twice will spawn an error.
gri.glb_gridfcs_list = []
# Step 11: Call the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module,
# which should do exactly the same as in Steps 1-10 above.
import ScalarWave.ScalarWave_RHSs as swrhs
swrhs.ScalarWave_RHSs()
# Step 12: Consistency check between the tutorial module above
# and the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module.
# It is SAFE to ignore the warning from re-initializing the parameter RMAX.
print("^^^ Ignore the minor warning above. ^^^\n")
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_rhs - swrhs.uu_rhs: Should be zero: ",sp.simplify(uu_rhs - swrhs.uu_rhs))
print("vv_rhs - swrhs.vv_rhs: Should be zero: ",sp.simplify(vv_rhs - swrhs.vv_rhs))
###Output
^^^ Ignore the minor warning above. ^^^
Consistency check between ScalarWave tutorial and NRPy+ module:
('uu_rhs - swrhs.uu_rhs: Should be zero: ', 0)
('vv_rhs - swrhs.vv_rhs: Should be zero: ', 0)
###Markdown
Plane-Wave Initial Data for the Scalar Wave EquationThe solution to the scalar wave equation for a monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction is$$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$where $\hat{k}$ is a unit vector. We choose $f(\hat{k}\cdot\vec{x} - c t)$ to take the form$$f(\hat{k}\cdot\vec{x} - c t) = \sin\left(\hat{k}\cdot\vec{x} - c t\right) + 2,$$where we add the $+2$ to ensure that the exact solution never crosses through zero. In places where the exact solution passes through zero, the relative error (i.e., the measure of error to compare numerical with exact results) is undefined. Also, $f(\hat{k}\cdot\vec{x} - c t)$ plus a constant is still a solution to the wave equation.
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL",thismodule,"time")
kk = par.Cparameters("REAL",thismodule,["kk0","kk1","kk2"])
# Step 3: Normalize the k vector
kk_norm = sp.sqrt(kk[0]**2 + kk[1]**2 + kk[2]**2)
# Step 4: Compute k.x
dot_product = sp.sympify(0)
for i in range(DIM):
dot_product += xx[i]*kk[i]
dot_product /= kk_norm
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID = sp.sin(dot_product - wavespeed*time)+2
vv_ID = sp.diff(uu_ID, time)
###Output
_____no_output_____
###Markdown
Next we verify that $f(\hat{k}\cdot\vec{x} - c t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\ f\left(\hat{k}\cdot\vec{x} - c t\right),$$and confirming the result is exactly zero.
###Code
sp.simplify(wavespeed**2*(sp.diff(uu_ID,xx[0],2) +
sp.diff(uu_ID,xx[1],2) +
sp.diff(uu_ID,xx[2],2))
- sp.diff(uu_ID,time,2))
###Output
_____no_output_____
###Markdown
NRPy+ Module Code ValidationAs a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between1. this tutorial and 2. the NRPy+ ScalarWave/InitialData_PlaneWave.py module.
###Code
# We just defined SymPy expressions for uu_ID and vv_ID in
# terms of other SymPy variables. Here, we will use the
# above-defined uu_ID and vv_ID to validate against the
# same expressions in the ScalarWave/InitialData_PlaneWave.py
# module, to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the PlaneWave ID module itself.
#
# Step 6: Call the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module,
# which should do exactly the same as in Steps 1-5 above.
import ScalarWave.InitialData_PlaneWave as swid
swid.InitialData_PlaneWave()
# Step 7: Consistency check between the tutorial module above
# and the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module.
# It is SAFE to ignore the warning from re-initializing the parameter RMAX.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_ID - swid.uu_ID: Should be zero: ",sp.simplify(uu_ID - swid.uu_ID))
print("vv_ID - swid.vv_ID: Should be zero: ",sp.simplify(vv_ID - swid.vv_ID))
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
('uu_ID - swid.uu_ID: Should be zero: ', 0)
('vv_ID - swid.vv_ID: Should be zero: ', 0)
###Markdown
window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); Generating C Code for the Scalar Wave Equation in Cartesian Coordinates Author: Zach Etienne Formatting improvements courtesy Brandon Clark This module generates the C Code for the Scalarwave in Cartesian coordinates and sets up [Plane Wave Initial Data](https://en.wikipedia.org/wiki/Initial_value_problem). **Module Status:** Validated **Validation Notes:** This tutorial module has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](code_validation). In addition, all expressions have been validated against a trusted code (the [original SENR/NRPy+ code](https://bitbucket.org/zach_etienne/nrpy)). NRPy+ Source Code for this module: * [ScalarWave/ScalarWave_RHSs.py](../edit/ScalarWave/ScalarWave_RHSs.py)* [ScalarWave/InitialData_PlaneWave.py](../edit/ScalarWave/InitialData_PlaneWave.py) Introduction: Problem StatementWe wish to numerically solve the scalar wave equation as an [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem) in Cartesian coordinates:$$\partial_t^2 u = c^2 \nabla^2 u \text{,}$$where $u$ (the amplitude of the wave) is a function of time and space: $u = u(t,x,y,...)$ (spatial dimension as-yet unspecified) and $c$ is the wave speed, subject to some initial condition$$u(0,x,y,...) = f(x,y,...)$$and suitable spatial boundary conditions.As described in the next section, we will find it quite useful to define$$v(t,x,y,...) = \partial_t u(t,x,y,...).$$In this way, the second-order PDE is reduced to a set of two coupled first-order PDEs\begin{align}\partial_t u &= v \\\partial_t v &= c^2 \nabla^2 u.\end{align}We will use NRPy+ to generate efficient C codes capable of generating both initial data $u(0,x,y,...) = f(x,y,...)$; $v(0,x,y,...)=g(x,y,...)$, as well as finite-difference expressions for the right-hand sides of the above expressions. These expressions are needed within the *Method of Lines* to "integrate" the solution forward in time. The Method of LinesOnce we have initial data, we "evolve it forward in time", using the [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html). In short, the Method of Lines enables us to handle 1. the **spatial derivatives** of an initial value problem PDE using **standard finite difference approaches**, and2. the **temporal derivatives** of an initial value problem PDE using **standard strategies for solving ordinary differential equations (ODEs)**, so long as the initial value problem PDE can be written in the form$$\partial_t \vec{f} = \mathbf{M}\ \vec{f},$$where $\mathbf{M}$ is an $N\times N$ matrix filled with differential operators that act on the $N$-element column vector $\vec{f}$. $\mathbf{M}$ may not contain $t$ or time derivatives explicitly; only *spatial* partial derivatives are allowed to appear inside $\mathbf{M}$. The scalar wave equation as written in the [previous module](Tutorial-ScalarWave.ipynb)\begin{equation}\partial_t \begin{bmatrix}u \\v \end{bmatrix}=\begin{bmatrix}0 & 1 \\c^2 \nabla^2 & 0 \end{bmatrix}\begin{bmatrix}u \\v \end{bmatrix}\end{equation}satisfies this requirement. Thus we can treat the spatial derivatives $\nabla^2 u$ of the scalar wave equation using **standard finite-difference approaches**, and the temporal derivatives $\partial_t u$ and $\partial_t v$ using **standard approaches for solving ODEs**. In [the next module](Tutorial-Start_to_Finish-ScalarWave.ipynb), we will apply the highly robust [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4), used widely for numerically solving ODEs, to "march" (integrate) the solution vector $\vec{f}$ forward in time from its initial value ("initial data"). Basic AlgorithmThe basic algorithm for solving the scalar wave equation [initial value problem](https://en.wikipedia.org/wiki/Initial_value_problem), based on the Method of Lines (see section above) is outlined below, with NRPy+-based components highlighted in green. We will review how NRPy+ generates these core components in this module.1. Allocate memory for gridfunctions, including temporary storage for the RK4 time integration.1. Set gridfunction values to initial data.1. Evolve the system forward in time using RK4 time integration. At each RK4 substep, do the following: 1. Evaluate scalar wave RHS expressions. 1. Apply boundary conditions.**We refer to the right-hand side of the equation $\partial_t \vec{f} = \mathbf{M}\ \vec{f}$ as the RHS. In this case, we refer to the $\mathbf{M}\ \vec{f}$ as the "scalar wave RHSs".** In the following sections we will 1. Use NRPy+ to cast the scalar wave RHS expressions -- in finite difference form -- into highly efficient C code, 1. first in one spatial dimension with fourth-order finite differences, 1. and then in three spatial dimensions with tenth-order finite differences.1. Use NRPy+ to generate monochromatic plane-wave initial data for the scalar wave equation, where the wave propagates in an arbitrary direction.As for the $\nabla^2 u$ term, spatial derivatives are handled in NRPy+ via [finite differencing](https://en.wikipedia.org/wiki/Finite_difference).We will sample the solution $\{u,v\}$ at discrete, uniformly-sampled points in space and time. For simplicity, let's assume that we consider the wave equation in one spatial dimension. Then the solution at any sampled point in space and time is given by$$u^n_i = u(t_n,x_i) = u(t_0 + n \Delta t, x_0 + i \Delta x),$$where $\Delta t$ and $\Delta x$ represent the temporal and spatial resolution, respectively. $v^n_i$ is sampled at the same points in space and time. Table of Contents$$\label{toc}$$1. [Step 1](initializenrpy): Initialize core NRPy+ modules1. [Step 2](step2): Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing1. [Step 3](step3): Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing 1. [Step 3.a](code_validation1): Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module1. [Step 4](step4): Plane-Wave Initial Data for the Scalar Wave Equation 1. [Step 4.a](code_validation2): Code Validation against `ScalarWave.InitialData_PlaneWave` NRPy+ module1. [Step 5](latex_pdf_output): Output this module to $\LaTeX$-formatted PDF file Step 1: Initialize core NRPy+ modules \[Back to [top](toc)\]$$\label{initializenrpy}$$Let's start by importing all the needed modules from NRPy+:
###Code
# Step P1: Import needed NRPy+ core modules:
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import finite_difference as fin
from outputC import *
###Output
_____no_output_____
###Markdown
Step 2: Scalar Wave RHSs in One Spatial Dimension, Fourth-Order Finite Differencing \[Back to [top](toc)\]$$\label{step2}$$To minimize complication, we will first restrict ourselves to solving the wave equation in one spatial dimension, so$$\nabla^2 u = \partial_x^2 u.$$Extension of this operator to higher spatial dimensions is straightforward, particularly when using NRPy+.As was discussed in [the finite difference section of the tutorial](Tutorial-Finite_Difference_Derivatives.ipynb), NRPy+ approximates derivatives using [finite difference methods](), the second-order derivative $\partial_x^2$ accurate to fourth-order in uniform grid spacing $\Delta x$ (from fitting the unique 4th-degree polynomial to 5 sample points of $u$) is given by\begin{equation}\left[\partial_x^2 u(t,x)\right]_j = \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right)+ \mathcal{O}\left((\Delta x)^4\right).\end{equation}
###Code
# Step P2: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
thismodule = "ScalarWave"
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 1: Set the spatial dimension parameter, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",1)
DIM = par.parval_from_str("grid::DIM")
# Step 2: Set the finite differencing order to 4.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",4)
# Step 3: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 4: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 5: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
vv_rhs = sp.simplify(vv_rhs)
# Step 6: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)])
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expression:
* "const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1/3 - uu_i0m2/12 + 4*uu_i0p1/3 - uu_i0p2/12)"
*/
const double uu_i0m2 = in_gfs[IDX2(UUGF, i0-2)];
const double uu_i0m1 = in_gfs[IDX2(UUGF, i0-1)];
const double uu = in_gfs[IDX2(UUGF, i0)];
const double uu_i0p1 = in_gfs[IDX2(UUGF, i0+1)];
const double uu_i0p2 = in_gfs[IDX2(UUGF, i0+2)];
const double vv = in_gfs[IDX2(VVGF, i0)];
const double uu_dDD00 = pow(invdx0, 2)*(-5.0/2.0*uu + (4.0/3.0)*uu_i0m1 - 1.0/12.0*uu_i0m2 + (4.0/3.0)*uu_i0p1 - 1.0/12.0*uu_i0p2);
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[rhs_gfs[IDX2(UUGF, i0)] = vv,
* rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*wavespeed**2]"
*/
rhs_gfs[IDX2(UUGF, i0)] = vv;
rhs_gfs[IDX2(VVGF, i0)] = uu_dDD00*pow(wavespeed, 2);
}
###Markdown
**Success!** Notice that indeed NRPy+ was able to compute the spatial derivative operator,\begin{equation}\left[\partial_x^2 u(t,x)\right]_j \approx \frac{1}{(\Delta x)^2}\left(-\frac{1}{12} \left(u_{j+2} + u_{j-2}\right) + \frac{4}{3} \left(u_{j+1} + u_{j-1}\right)- \frac{5}{2} u_j \right),\end{equation}correctly (easier to read in the "Original SymPy expressions" comment block at the top of the C output. Note that `invdx0`$=1/\Delta x_0$, where $\Delta x_0$ is the (uniform) grid spacing in the zeroth, or $x_0$ direction. Step 3: Scalar Wave RHSs in Three Spatial Dimensions, Tenth-Order Finite Differencing \[Back to [top](toc)\]$$\label{step3}$$Let's next repeat the same process, only this time at **10th** finite difference order, for the **3-spatial-dimension** scalar wave equation, with SIMD enabled:
###Code
# Step 1: Define the C parameter wavespeed. The `wavespeed`
# variable is a proper SymPy variable, so it can be
# used in below expressions. In the C code, it acts
# just like a usual parameter, whose value is
# specified in the parameter file.
wavespeed = par.Cparameters("REAL",thismodule,"wavespeed", 1.0)
# Step 2: Set the spatial dimension parameter
# to *FOUR* this time, and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 3: Set the finite differencing order to 10.
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER",10)
# Step 4a: Reset gridfunctions registered in 1D case above,
# to avoid NRPy+ throwing an error about double-
# registering gridfunctions, which is not allowed.
gri.glb_gridfcs_list = []
# Step 4b: Register gridfunctions that are needed as input
# to the scalar wave RHS expressions.
uu, vv = gri.register_gridfunctions("EVOL",["uu","vv"])
# Step 5: Declare the rank-2 indexed expression \partial_{ij} u,
# which is symmetric about interchange of indices i and j
# Derivative variables like these must have an underscore
# in them, so the finite difference module can parse the
# variable name properly.
uu_dDD = ixp.declarerank2("uu_dDD","sym01")
# Step 6: Define right-hand sides for the evolution.
uu_rhs = vv
vv_rhs = 0
for i in range(DIM):
vv_rhs += wavespeed*wavespeed*uu_dDD[i][i]
# Step 7: Simplify the expression for c^2 \nabla^2 u (a.k.a., vv_rhs):
vv_rhs = sp.simplify(vv_rhs)
# Step 8: Generate C code for scalarwave evolution equations,
# print output to the screen (standard out, or stdout).
fin.FD_outputC("stdout",
[lhrh(lhs=gri.gfaccess("rhs_gfs","uu"),rhs=uu_rhs),
lhrh(lhs=gri.gfaccess("rhs_gfs","vv"),rhs=vv_rhs)],params="SIMD_enable=True")
###Output
{
/*
* NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY uu_dDD00 = invdx0**2*(-5269*uu/1800 + 5*uu_i0m1_i1_i2/3 - 5*uu_i0m2_i1_i2/21 + 5*uu_i0m3_i1_i2/126 - 5*uu_i0m4_i1_i2/1008 + uu_i0m5_i1_i2/3150 + 5*uu_i0p1_i1_i2/3 - 5*uu_i0p2_i1_i2/21 + 5*uu_i0p3_i1_i2/126 - 5*uu_i0p4_i1_i2/1008 + uu_i0p5_i1_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD11 = invdx1**2*(-5269*uu/1800 + 5*uu_i0_i1m1_i2/3 - 5*uu_i0_i1m2_i2/21 + 5*uu_i0_i1m3_i2/126 - 5*uu_i0_i1m4_i2/1008 + uu_i0_i1m5_i2/3150 + 5*uu_i0_i1p1_i2/3 - 5*uu_i0_i1p2_i2/21 + 5*uu_i0_i1p3_i2/126 - 5*uu_i0_i1p4_i2/1008 + uu_i0_i1p5_i2/3150),
* const REAL_SIMD_ARRAY uu_dDD22 = invdx2**2*(-5269*uu/1800 + 5*uu_i0_i1_i2m1/3 - 5*uu_i0_i1_i2m2/21 + 5*uu_i0_i1_i2m3/126 - 5*uu_i0_i1_i2m4/1008 + uu_i0_i1_i2m5/3150 + 5*uu_i0_i1_i2p1/3 - 5*uu_i0_i1_i2p2/21 + 5*uu_i0_i1_i2p3/126 - 5*uu_i0_i1_i2p4/1008 + uu_i0_i1_i2p5/3150)]"
*/
const REAL_SIMD_ARRAY uu_i0_i1_i2m5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-5)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2m1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2-1)]);
const REAL_SIMD_ARRAY uu_i0_i1m5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1m1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1-1,i2)]);
const REAL_SIMD_ARRAY uu_i0m5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0m1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0-1,i1,i2)]);
const REAL_SIMD_ARRAY uu = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p1_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+1,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p2_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+2,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p3_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+3,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p4_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+4,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0p5_i1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0+5,i1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p1_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+1,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p2_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+2,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p3_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+3,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p4_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+4,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1p5_i2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1+5,i2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p1 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+1)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p2 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+2)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p3 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+3)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p4 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+4)]);
const REAL_SIMD_ARRAY uu_i0_i1_i2p5 = ReadSIMD(&in_gfs[IDX4(UUGF, i0,i1,i2+5)]);
const REAL_SIMD_ARRAY vv = ReadSIMD(&in_gfs[IDX4(VVGF, i0,i1,i2)]);
const double tmpFD_Rational_1_3150_FDcoeff = 0.0003174603174603174603174603174603175;
const REAL_SIMD_ARRAY _Rational_1_3150_FDcoeff = ConstSIMD(tmpFD_Rational_1_3150_FDcoeff);
const double tmpFD_Rational_5_126_FDcoeff = 0.03968253968253968253968253968253968;
const REAL_SIMD_ARRAY _Rational_5_126_FDcoeff = ConstSIMD(tmpFD_Rational_5_126_FDcoeff);
const double tmpFD_Rational_5_3_FDcoeff = 1.666666666666666666666666666666667;
const REAL_SIMD_ARRAY _Rational_5_3_FDcoeff = ConstSIMD(tmpFD_Rational_5_3_FDcoeff);
const double tmpFD_Rational_m5269_1800_FDcoeff = -2.927222222222222222222222222222222;
const REAL_SIMD_ARRAY _Rational_m5269_1800_FDcoeff = ConstSIMD(tmpFD_Rational_m5269_1800_FDcoeff);
const double tmpFD_Rational_m5_1008_FDcoeff = -0.004960317460317460317460317460317460;
const REAL_SIMD_ARRAY _Rational_m5_1008_FDcoeff = ConstSIMD(tmpFD_Rational_m5_1008_FDcoeff);
const double tmpFD_Rational_m5_21_FDcoeff = -0.2380952380952380952380952380952381;
const REAL_SIMD_ARRAY _Rational_m5_21_FDcoeff = ConstSIMD(tmpFD_Rational_m5_21_FDcoeff);
const REAL_SIMD_ARRAY tmpFD0 = MulSIMD(uu, _Rational_m5269_1800_FDcoeff);
const REAL_SIMD_ARRAY uu_dDD00 = MulSIMD(FusedMulAddSIMD(uu_i0p4_i1_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0p3_i1_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0p2_i1_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0p1_i1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0m5_i1_i2, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0m4_i1_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0m3_i1_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0m2_i1_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0m1_i1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0p5_i1_i2, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx0, invdx0));
const REAL_SIMD_ARRAY uu_dDD11 = MulSIMD(FusedMulAddSIMD(uu_i0_i1p4_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1p3_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1p2_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1p1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1m5_i2, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0_i1m4_i2, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1m3_i2, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1m2_i2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1m1_i2, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1p5_i2, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx1, invdx1));
const REAL_SIMD_ARRAY uu_dDD22 = MulSIMD(FusedMulAddSIMD(uu_i0_i1_i2p4, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p3, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p1, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m5, _Rational_1_3150_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m4, _Rational_m5_1008_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m3, _Rational_5_126_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m2, _Rational_m5_21_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2m1, _Rational_5_3_FDcoeff, FusedMulAddSIMD(uu_i0_i1_i2p5, _Rational_1_3150_FDcoeff, tmpFD0)))))))))), MulSIMD(invdx2, invdx2));
/*
* NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:
*/
/*
* Original SymPy expressions:
* "[const REAL_SIMD_ARRAY __RHS_exp_0 = vv,
* const REAL_SIMD_ARRAY __RHS_exp_1 = wavespeed**2*(uu_dDD00 + uu_dDD11 + uu_dDD22)]"
*/
const REAL_SIMD_ARRAY __RHS_exp_0 = vv;
const REAL_SIMD_ARRAY __RHS_exp_1 = MulSIMD(AddSIMD(uu_dDD11, AddSIMD(uu_dDD00, uu_dDD22)), MulSIMD(wavespeed, wavespeed));
WriteSIMD(&rhs_gfs[IDX4(UUGF, i0, i1, i2)], __RHS_exp_0);
WriteSIMD(&rhs_gfs[IDX4(VVGF, i0, i1, i2)], __RHS_exp_1);
}
###Markdown
Step 3.a: Code Validation against `ScalarWave.ScalarWave_RHSs` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation1}$$Here, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the three-spatial-dimension Scalar Wave equation (i.e., `uu_rhs` and `vv_rhs`) between1. this tutorial and 2. the [NRPy+ ScalarWave.ScalarWave_RHSs](../edit/ScalarWave/ScalarWave_RHSs.py) module.
###Code
# Step 10: We already have SymPy expressions for uu_rhs and vv_rhs in
# terms of other SymPy variables. Even if we reset the list
# of NRPy+ gridfunctions, these *SymPy* expressions for
# uu_rhs and vv_rhs *will remain unaffected*.
#
# Here, we will use the above-defined uu_rhs and vv_rhs to
# validate against the same expressions in the
# ScalarWave/ScalarWave_RHSs.py module,
# to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the ScalarWave_RHSs.py module itself.
#
# Reset the list of gridfunctions, as registering a gridfunction
# twice will spawn an error.
gri.glb_gridfcs_list = []
# Step 11: Call the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module,
# which should do exactly the same as in Steps 1-10 above.
import ScalarWave.ScalarWave_RHSs as swrhs
swrhs.ScalarWave_RHSs()
# Step 12: Consistency check between the tutorial module above
# and the ScalarWave_RHSs() function from within the
# ScalarWave/ScalarWave_RHSs.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_rhs - swrhs.uu_rhs = "+str(sp.simplify(uu_rhs - swrhs.uu_rhs))+"\t\t (should be zero)")
print("vv_rhs - swrhs.vv_rhs = "+str(sp.simplify(vv_rhs - swrhs.vv_rhs))+"\t\t (should be zero)")
###Output
Consistency check between ScalarWave tutorial and NRPy+ module:
uu_rhs - swrhs.uu_rhs = 0 (should be zero)
vv_rhs - swrhs.vv_rhs = 0 (should be zero)
###Markdown
Step 4: Plane-Wave Initial Data for the Scalar Wave Equation \[Back to [top](toc)\]$$\label{step4}$$The solution to the scalar wave equation for a monochromatic (single-wavelength) wave traveling in the $\hat{k}$ direction is$$u(\vec{x},t) = f(\hat{k}\cdot\vec{x} - c t),$$where $\hat{k}$ is a unit vector. We choose $f(\hat{k}\cdot\vec{x} - c t)$ to take the form$$f(\hat{k}\cdot\vec{x} - c t) = \sin\left(\hat{k}\cdot\vec{x} - c t\right) + 2,$$where we add the $+2$ to ensure that the exact solution never crosses through zero. In places where the exact solution passes through zero, the relative error (i.e., the measure of error to compare numerical with exact results) is undefined. Also, $f(\hat{k}\cdot\vec{x} - c t)$ plus a constant is still a solution to the wave equation.
###Code
# Step 1: Set parameters defined in other modules
xx = gri.xx
# Step 2: Declare free parameters intrinsic to these initial data
time = par.Cparameters("REAL", thismodule, "time",0.0)
kk = par.Cparameters("REAL", thismodule, ["kk0", "kk1", "kk2"],[1.0,1.0,1.0])
# Step 3: Normalize the k vector
kk_norm = sp.sqrt(kk[0]**2 + kk[1]**2 + kk[2]**2)
# Step 4: Compute k.x
dot_product = sp.sympify(0)
for i in range(DIM):
dot_product += xx[i]*kk[i]
dot_product /= kk_norm
# Step 5: Set initial data for uu and vv, where vv_ID = \partial_t uu_ID.
uu_ID = sp.sin(dot_product - wavespeed*time)+2
vv_ID = sp.diff(uu_ID, time)
###Output
_____no_output_____
###Markdown
Next we verify that $f(\hat{k}\cdot\vec{x} - c t)$ satisfies the wave equation, by computing$$\left(c^2 \nabla^2 - \partial_t^2 \right)\ f\left(\hat{k}\cdot\vec{x} - c t\right),$$and confirming the result is exactly zero.
###Code
sp.simplify(wavespeed**2*(sp.diff(uu_ID,xx[0],2) +
sp.diff(uu_ID,xx[1],2) +
sp.diff(uu_ID,xx[2],2))
- sp.diff(uu_ID,time,2))
###Output
_____no_output_____
###Markdown
Step 4.a: Code Validation against `ScalarWave.InitialData_PlaneWave` NRPy+ module \[Back to [top](toc)\]$$\label{code_validation2}$$As a code validation check, we will verify agreement in the SymPy expressions for plane-wave initial data for the Scalar Wave equation between1. this tutorial and 2. the NRPy+ [ScalarWave.InitialData_PlaneWave](../edit/ScalarWave/InitialData_PlaneWave.py) module.
###Code
# We just defined SymPy expressions for uu_ID and vv_ID in
# terms of other SymPy variables. Here, we will use the
# above-defined uu_ID and vv_ID to validate against the
# same expressions in the ScalarWave/InitialData_PlaneWave.py
# module, to ensure consistency between this tutorial
# (historically speaking, the tutorial was written first)
# and the PlaneWave ID module itself.
#
# Step 6: Call the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module,
# which should do exactly the same as in Steps 1-5 above.
import ScalarWave.InitialData_PlaneWave as swid
swid.InitialData_PlaneWave()
# Step 7: Consistency check between the tutorial module above
# and the InitialData_PlaneWave() function from within the
# ScalarWave/InitialData_PlaneWave.py module.
print("Consistency check between ScalarWave tutorial and NRPy+ module:")
print("uu_ID - swid.uu_ID = "+str(sp.simplify(uu_ID - swid.uu_ID))+"\t\t (should be zero)")
print("vv_ID - swid.vv_ID = "+str(sp.simplify(vv_ID - swid.vv_ID))+"\t\t (should be zero)")
###Output
_____no_output_____
###Markdown
Step 5: Output this module to $\LaTeX$-formatted PDF file \[Back to [top](toc)\]$$\label{latex_pdf_output}$$The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename[Tutorial-ScalarWave.pdf](Tutorial-ScalarWave.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
###Code
!jupyter nbconvert --to latex --template latex_nrpy_style.tplx Tutorial-ScalarWave.ipynb
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!pdflatex -interaction=batchmode Tutorial-ScalarWave.tex
!rm -f Tut*.out Tut*.aux Tut*.log
###Output
[NbConvertApp] Converting notebook Tutorial-ScalarWave.ipynb to latex
[NbConvertApp] Writing 65612 bytes to Tutorial-ScalarWave.tex
'pdflatex' 不是内部或外部命令,也不是可运行的程序
或批处理文件。
'pdflatex' 不是内部或外部命令,也不是可运行的程序
或批处理文件。
'pdflatex' 不是内部或外部命令,也不是可运行的程序
或批处理文件。
'rm' 不是内部或外部命令,也不是可运行的程序
或批处理文件。
|
Copy_of_pandas_and_sklearn.ipynb | ###Markdown
Exploring `pandas` and `scikit-learn`In this notebook we'll explore some more advanced features of two popular Python libraries:- [pandas](https://pandas.pydata.org/): **P**ytho**N** **DA**ta **S**cience Library is essentially a wrapper for [numpy](https://numpy.org/) that provides really useful tools for bookkeeping and manipulating data. If you're familiar with [`data.frame`](https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/data.frame) objects or the [`plyr`](https://www.rdocumentation.org/packages/plyr/versions/1.8.6) or [`dplyr`](https://dplyr.tidyverse.org/) libraries in R, you'll see lots of similarities to Pandas `DataFrame` objects and how to work with them. As a historical note, Pandas was inspired by R dataframes and R's "[tidyverse](https://www.tidyverse.org/)" libraries for data science.- [scikit-learn](https://scikit-learn.org/stable/): a machine learning package for Python that implements many of the most widely used algorithms for classification, regression, clustering, dimensionality reduction, model selection, and preprocessing. Scikit-learn is built on top of [numpy](https://numpy.org/), [pandas](https://pandas.pydata.org/), and [matplotlib](https://matplotlib.org/stable/index.html).Before digging into either of these libraries, here are some useful orienting guidelines that might help you to think about what each library does:- Numpy's main feature is that it introduces the `array` datatype. Arrays are like matrices or tensors-- *n*-dimensional tables of numbers.From an implementation standpoint, arrays are just like nested lists, where the outer-most level corresponds to the first dimension, the second outer-most level corresponds to the second dimension, and so on (where the inner-most level corresponds to the last dimension). Because arrays are built on lists, you can often pass nested lists directly into numpy functions (as though they were arrays) and they'll be treated just like arrays. Once you've organized your data into one or more arrays (or array-like objects), numpy allows you to apply a wide variety of linear algebra operations to the data. Numpy is also written in efficient C code, which means that you can work effectively with very large datasets.- Pandas's main feature is the introduction of the `DataFrame` datatype. From an implementation standpoint, DataFrames are like Python dictionaries where one key, called "`values`" points to the data (stored as a 2D numpy array); a second key, called "`index`" points to another numpy array that contains labels for the rows; and a third key, called "`columns`" that points to a numpy array containing labels for the columns. Once you've created a DataFrame, you can mostly treat it just like a 2D numpy array-- many numpy functions "just work" on DataFrames (e.g., it'll usually work fine if you just pass DataFrames directly to numpy methods that are expecting arrays).- Scikit-learn really provides two general tools: - First, the library includes implementations of a wide variety of algorithms for doing the most widely used machine learning tasks. This typically requires your data to be organized into numpy arrays and/or pandas DataFrames and or lists. - Second, the library includes a general framework for *organizing* code for implementing machine learning algorithms. This is arguably the most powerful and furthest-reaching contribution of scikit-learn. For example, nearly every model in scikit-learn is implemented as a Python class with a `fit` method (which takes in a training dataset and trains or applies the given model) and a `transform` method (which takes in potentially new data and projects it through the given model). The common structure across models means that it's relatively straightforward to implement new models or features that will then play nicely with the other functionality in the library. - Additional related libraries extend the functionality of scikit-learn even further. For example, [scikit-image](https://scikit-image.org/) adds image processing algorithms; [scikit-network](https://scikit-network.readthedocs.io/en/latest/) adds graph theory algorithms; [scikit-optimize](https://scikit-optimize.github.io/stable/) adds some additional optimization algorithms; and so on. - There is some redundancy between scikit-learn and other popular libraries. For example, scikit-learn includes some deep learning models and tools. However, most of these implementations are less efficient and less flexible than libraries like [tensorflow](https://www.tensorflow.org/) or [pytorch](https://pytorch.org/) that are focused specifically on deep learning, rather than on "machine learning" in general. - A reasonable rule of thumb might be to implement basic ideas using scikit-learn as a way to get things "up and running" on a test dataset or application. But then if you want to scale things up to a much larger dataset you may want to port things over to another library. Library imports
###Code
import numpy as np
import pandas as pd
import sklearn as skl
import matplotlib as mpl
import seaborn as sns
###Output
_____no_output_____
###Markdown
DatasetsWe'll play around with a "toy" dataset included with Seaborn: - A list of 891 Titanic passengers and various compiled pieces of information about thenWe'll also look at two datasets from [fivethirtyeight](https://fivethirtyeight.com/): - Guests that appeared on Jon Stewart's 'The Daily Show' (inspired by [this article](https://fivethirtyeight.com/features/every-guest-jon-stewart-ever-had-on-the-daily-show/) - Superbowl commercials (inspired by [this article](https://projects.fivethirtyeight.com/super-bowl-ads/))We can use the pandas [read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) function to load in data stored in a CSV file. Analogous pandas functions can read in data stored in a wide variety of formats including Excel, JSON, HDF, SAS, SPSS, SQL, BigQuery, STATA, [and more](https://pandas.pydata.org/pandas-docs/stable/reference/io.html). Most of these functions support reading both from locally stored files *or* directly from a remote URL.
###Code
titanic = sns.load_dataset('titanic')
daily_show_guests = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/daily-show-guests/daily_show_guests.csv', header=0)
superbowl_ads = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/superbowl-ads/main/superbowl-ads.csv', header=0)
###Output
_____no_output_____
###Markdown
It's always good to check that the dataset was loaded in correctly; I like to use the `head` function (which prints out the first 5 rows of the DataFrame by default; you can customize how many lines are printed by passing in any non-negative integer). The `tail` function behaves similarly, but it prints out the *last* rows of the table.
###Code
titanic.head()
daily_show_guests.head(20)
superbowl_ads.head(10)
###Output
_____no_output_____ |
docs/tutorials/regression.ipynb | ###Markdown
Regression with DLT In this notebook, we want to demonstartate how to use different arguments in **DLT** to train a model with various regression settings. We continue to use *iclaims* data for the demo purpose:1. regular regression2. regression with specific signs and priors for regression coefficientsFinally, we will also use a simulated dataset to illustrate different types of regression penalties:1. `fixed-ridge`2. `auto-ridge`3. `lasso`Generally speaking, regression coefficients are more robust under full Bayesian sampling and estimation. Hence, we will use `estimator=stan-mcmc` (the default) in the session.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import orbit
from orbit.utils.dataset import load_iclaims
from orbit.models import DLT
from orbit.diagnostics.plot import plot_predicted_data
from orbit.constants.palette import OrbitPalette
print(orbit.__version__)
###Output
1.1.1dev
###Markdown
US Weekly Initial Claims Recall the *iclaims* dataset by previous section. In order to use this data to nowcast the US unemployment claims during COVID-19 period, we extended the dataset to Jan 2021 and added the [S&P 500 (^GSPC)](https://finance.yahoo.com/quote/%5EGSPC/history?period1=1264032000&period2=1611187200&interval=1wk&filter=history&frequency=1wk&includeAdjustedClose=true) and [VIX](https://finance.yahoo.com/quote/%5EVIX/history?p=%5EVIX) Index historical data for the same period.The data is standardized and log-transformed for the model fitting purpose.
###Code
# load data
df = load_iclaims(end_date='2021-01-03')
date_col = 'week'
response_col = 'claims'
df.dtypes
df.head(5)
###Output
_____no_output_____
###Markdown
We can see form the plot below, there are seasonality, trend, and as well as a huge changpoint due the impact of COVID-19.
###Code
fig, axs = plt.subplots(2, 2,figsize=(20,8))
axs[0, 0].plot(df['week'], df['claims'])
axs[0, 0].set_title('Unemployment Claims')
axs[0, 1].plot(df['week'], df['trend.unemploy'], 'tab:orange')
axs[0, 1].set_title('Google trend - unemploy')
axs[1, 0].plot(df['week'], df['vix'], 'tab:green')
axs[1, 0].set_title('VIX')
axs[1, 1].plot(df['week'], df['sp500'], 'tab:red')
axs[1, 1].set_title('S&P500')
# using relatively updated data
df = df[df['week'] > '2018-01-01'].reset_index(drop=True)
test_size = 26
train_df = df[:-test_size]
test_df = df[-test_size:]
###Output
_____no_output_____
###Markdown
Naive Model Here we will use DLT models to compare the model performance with vs. without regression.
###Code
%%time
dlt = DLT(
response_col=response_col,
date_col=date_col,
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt.fit(df=train_df)
predicted_df = dlt.predict(df=test_df)
###Output
WARNING:pystan:n_eff / iter below 0.001 indicates that the effective sample size has likely been overestimated
WARNING:pystan:Rhat above 1.1 or below 0.9 indicates that the chains very likely have not mixed
###Markdown
DLT With Regression The regressor columns can be supplied via argument `regressor_col`. Recall the regression formula in **DLT**:$$\hat{y}_t =\mu_t + s_t + r_t \\r_t = \sum_{j}\beta_j x_{jt} \\\beta_j ~\sim \mathcal{N}(\mu_j, \sigma_j^2)$$Let's use the default where $\mu_j = 0$ and $\sigma_j = 1$. In addition, we can set a *sign* constraint for each coefficient $\beta_j$. This is can be done by supplying the `regressor_sign` as a list where elements are in one of followings:* '=': $\beta_j ~\sim \mathcal{N}(0, \sigma_j^2)$ i.e. $\beta_j \in (-\inf, \inf)$* '+': $\beta_j ~\sim \mathcal{N}^+(0, \sigma_j^2)$ i.e. $\beta_j \in [0, \inf)$* '-': $\beta_j ~\sim \mathcal{N}^-(0, \sigma_j^2)$ i.e. $\beta_j \in (-\inf, 0]$Based on some intuition, it's reasonable to assume search terms such as "unemployment", "filling" and **VIX** index to be positively correlated and stock index such as **SP500** to be negatively correlated to the outcome. Then we will leave whatever unsured as a regular regressor.
###Code
%%time
dlt_reg = DLT(
response_col=response_col,
date_col=date_col,
regressor_col=['trend.unemploy', 'trend.filling', 'trend.job', 'sp500', 'vix'],
regressor_sign=["+", '+', '=', '-', '+'],
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt_reg.fit(df=train_df)
predicted_df_reg = dlt_reg.predict(test_df)
###Output
WARNING:pystan:n_eff / iter below 0.001 indicates that the effective sample size has likely been overestimated
WARNING:pystan:Rhat above 1.1 or below 0.9 indicates that the chains very likely have not mixed
###Markdown
The estimated regressor coefficients can be retrieved via `.get_regression_coefs()`.
###Code
dlt_reg.get_regression_coefs()
###Output
_____no_output_____
###Markdown
DLT with Regression and Informative Priors Assuming users obtain further knowledge on some of the regressors, they could use informative priors ($\mu$, $\sigma$) by replacing the defaults. This can be done via the arguments `regressor_beta_prior` and `regressor_sigma_prior`. These two lists should be of the same lenght as `regressor_col`.
###Code
dlt_reg_adjust = DLT(
response_col=response_col,
date_col=date_col,
regressor_col=['trend.unemploy', 'trend.filling', 'trend.job', 'sp500', 'vix'],
regressor_sign=["+", '+', '=', '-', '+'],
regressor_beta_prior=[0.5, 0.25, 0.07, -0.3, 0.03],
regressor_sigma_prior=[0.1] * 5,
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt_reg_adjust.fit(df=train_df)
predicted_df_reg_adjust = dlt_reg_adjust.predict(test_df)
dlt_reg_adjust.get_regression_coefs()
###Output
_____no_output_____
###Markdown
Let's compare the holdout performance by using the built-in function `smape()` .
###Code
import numpy as np
from orbit.diagnostics.metrics import smape
# to reverse the log-transformation
def smape_adjusted(x, y):
x = np.exp(x)
y = np.exp(y)
return smape(x, y)
naive_smape = smape_adjusted(predicted_df['prediction'].values, test_df['claims'].values)
reg_smape = smape_adjusted(predicted_df_reg['prediction'].values, test_df['claims'].values)
reg_adjust_smape = smape_adjusted(predicted_df_reg_adjust['prediction'].values, test_df['claims'].values)
print('Naive Model: {:.3f}\nRegression Model: {:.3f}\nRefined Regression Model: {:.3f}'.format(
naive_smape, reg_smape, reg_adjust_smape
))
###Output
Naive Model: 0.205
Regression Model: 0.153
Refined Regression Model: 0.089
###Markdown
Regression on Simulated DatasetLet's use a simulated dateset to demonstrate sparse regression.
###Code
import pandas as pd
from orbit.utils.simulation import make_trend, make_regression
from orbit.diagnostics.metrics import mse
###Output
_____no_output_____
###Markdown
We have developed a few utilites to generate simulated data. For details, please refer to our API doc. In brief, we are generating observations $y$ such that$$y_t = l_t + r_t $$where$$r_t = \sum_p^{P} \beta_p x_{p,t}$$ Regular RegressionLet's start with a small number of regressors with $P=10$ and $T=100$.
###Code
NUM_OF_REGRESSORS = 10
SERIES_LEN = 50
SEED = 20210101
# sample some coefficients
COEFS = np.random.default_rng(SEED).uniform(-1, 1, NUM_OF_REGRESSORS)
trend = make_trend(SERIES_LEN, rw_loc=0.01, rw_scale=0.1)
x, regression, coefs = make_regression(series_len=SERIES_LEN, coefs=COEFS)
print(regression.shape, x.shape)
# combine trend and the regression
y = trend + regression
x_cols = [f"x{x}" for x in range(1, NUM_OF_REGRESSORS + 1)]
response_col = "y"
dt_col = "date"
obs_matrix = np.concatenate([y.reshape(-1, 1), x], axis=1)
# make a data frame for orbit inputs
df = pd.DataFrame(obs_matrix, columns=[response_col] + x_cols)
# make some dummy date stamp
dt = pd.date_range(start='2016-01-04', periods=SERIES_LEN, freq="1W")
df['date'] = dt
df.shape
###Output
_____no_output_____
###Markdown
Let's take a peek on the coefficients.
###Code
coefs
###Output
_____no_output_____
###Markdown
Now, let's run a regression with the defaults where we have constant `regressor_sigma_prior` and `regression_penalty` set as `fixed-ridge`. Fixed Ridge Penalty
###Code
%%time
dlt_fridge = DLT(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
# this is default
regression_penalty='fixed_ridge',
# fixing the smoothing parameters to learn regression coefficients more effectively
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=4000,
)
dlt_fridge.fit(df=df)
coef_fridge = np.quantile(dlt_fridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=20)
plt.plot(idx, coef_fridge[1], color=OrbitPalette.GREEN.value, linewidth=lw, drawstyle='steps', label='Fixed-Ridge', alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_fridge[0], coef_fridge[2], step='pre', alpha=0.3, color=OrbitPalette.GREEN.value)
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.ylim(1, -1)
plt.legend(prop={'size': 20})
plt.grid()
###Output
_____no_output_____
###Markdown
We can also set the `regression_penalty` to be `auto-ridge` in case we are sure what to set for the `regressor_sigma_prior`. Auto-Ridge Penalty Instead of using fixed scale in the coefficients prior, a hyperprior can be assigned to them, i.e.$$ \sigma_j \sim \text{Cauchy}^{+} {(0, \alpha)} $$This can be done by setting `regression_penalty="auto_ridge"` with the argument `auto_ridge_scale` (default of `0.5`) set the hyperprior $\alpha$. We can also supply stan config such as `adapt_delta` to reduce divergence. Check the [here](https://mc-stan.org/rstanarm/reference/adapt_delta.html) for details of `adapt_delta`.
###Code
%%time
dlt_auto_ridge = DLT(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
# this is default
regression_penalty='auto_ridge',
# fixing the smoothing parameters to learn regression coefficients more effectively
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=4000,
# reduce divergence
stan_mcmc_control={'adapt_delta':0.9},
)
dlt_auto_ridge.fit(df=df)
coef_auto_ridge = np.quantile(dlt_auto_ridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(idx, coef_auto_ridge[1], color=OrbitPalette.GREEN.value, linewidth=lw, drawstyle='steps', label='Auto-Ridge', alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_auto_ridge[0], coef_auto_ridge[2], step='pre', alpha=0.3, color=OrbitPalette.GREEN.value)
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.ylim(1, -1)
plt.legend(prop={'size': 20})
plt.grid();
print('Fixed Ridge MSE:{:.3f}\nAuto Ridge MSE:{:.3f}'.format(
mse(coef_fridge[1], coefs), mse(coef_auto_ridge[1], coefs)
))
###Output
Fixed Ridge MSE:0.082
Auto Ridge MSE:0.079
###Markdown
Sparse RegrssionNow, let's move to a challenging problem with a much higher $P$ to $N$ ratio with a sparsity specified by the parameter `relevance=0.5` under the simulation process.
###Code
NUM_OF_REGRESSORS = 50
SERIES_LEN = 50
SEED = 20210101
COEFS = np.random.default_rng(SEED).uniform(0.3, 0.5, NUM_OF_REGRESSORS)
SIGNS = np.random.default_rng(SEED).choice([1, -1], NUM_OF_REGRESSORS)
# to mimic a either zero or relative observable coefficients
COEFS = COEFS * SIGNS
trend = make_trend(SERIES_LEN, rw_loc=0.01, rw_scale=0.1)
x, regression, coefs = make_regression(series_len=SERIES_LEN, coefs=COEFS, relevance=0.5)
print(regression.shape, x.shape)
# generated sparsed coefficients
coefs
# combine trend and the regression
y = trend + regression
x_cols = [f"x{x}" for x in range(1, NUM_OF_REGRESSORS + 1)]
response_col = "y"
dt_col = "date"
obs_matrix = np.concatenate([y.reshape(-1, 1), x], axis=1)
# make a data frame for orbit inputs
df = pd.DataFrame(obs_matrix, columns=[response_col] + x_cols)
# make some dummy date stamp
dt = pd.date_range(start='2016-01-04', periods=SERIES_LEN, freq="1W")
df['date'] = dt
df.shape
###Output
_____no_output_____
###Markdown
Fixed Ridge Penalty
###Code
dlt_fridge = DLT(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=8000,
)
dlt_fridge.fit(df=df)
coef_fridge = np.quantile(dlt_fridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(coef_fridge[1], color=OrbitPalette.GREEN.value, linewidth=lw, drawstyle='steps', label="Ridge", alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_fridge[0], coef_fridge[2], step='pre', alpha=0.3, color=OrbitPalette.GREEN.value)
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.legend(prop={'size': 20})
plt.grid();
###Output
_____no_output_____
###Markdown
LASSO PenaltyIn high $P$ to $N$ problems, *LASS0* penalty usually shines compared to *Ridge* penalty.
###Code
dlt_lasso = DLT(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
regression_penalty='lasso',
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=8000,
)
dlt_lasso.fit(df=df)
coef_lasso = np.quantile(dlt_lasso._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(coef_lasso[1], color=OrbitPalette.GREEN.value, linewidth=lw, drawstyle='steps', label="Lasso", alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_lasso[0], coef_lasso[2], step='pre', alpha=0.3, color=OrbitPalette.GREEN.value)
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.legend(prop={'size': 20})
plt.grid();
print('Fixed Ridge MSE:{:.3f}\nLASSO MSE:{:.3f}'.format(
mse(coef_fridge[1], coefs), mse(coef_lasso[1], coefs)
))
###Output
Fixed Ridge MSE:0.162
LASSO MSE:0.102
###Markdown
Regression with DLT In this notebook, we want to demonstartate how to use different arguments in **DLT** to train a model with various regression settings. We continue to use *iclaims* data for the demo purpose:1. regular regression2. regression with specific signs and priors for regression coefficientsFinally, we will also use a simulated dataset to illustrate different types of regression penalties:1. `fixed-ridge`2. `auto-ridge`3. `lasso`Generally speaking, regression coefficients are more robust under full Bayesian sampling and estimation. Hence, we will use `DTLFull` in the session.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import orbit
from orbit.utils.dataset import load_iclaims
from orbit.models.dlt import DLTFull, DLTMAP
from orbit.diagnostics.plot import plot_predicted_data
from orbit.constants.palette import QualitativePalette
from pylab import rcParams
rcParams['figure.figsize'] = 14, 8
plt.style.use('fivethirtyeight')
assert orbit.__version__ == '1.0.14dev'
###Output
_____no_output_____
###Markdown
US Weekly Initial Claims Recall the *iclaims* dataset by previous section. In order to use this data to nowcast the US unemployment claims during COVID-19 period, we extended the dataset to Jan 2021 and added the [S&P 500 (^GSPC)](https://finance.yahoo.com/quote/%5EGSPC/history?period1=1264032000&period2=1611187200&interval=1wk&filter=history&frequency=1wk&includeAdjustedClose=true) and [VIX](https://finance.yahoo.com/quote/%5EVIX/history?p=%5EVIX) Index historical data for the same period.The data is standardized and log-transformed for the model fitting purpose.
###Code
# load data
df = load_iclaims(end_date='2021-01-03')
date_col = 'week'
response_col = 'claims'
df.dtypes
df.head(5)
###Output
_____no_output_____
###Markdown
We can see form the plot below, there are seasonlity, trend, and as well as a huge changpoint due the impact of COVID-19.
###Code
fig, axs = plt.subplots(2, 2,figsize=(20,8))
axs[0, 0].plot(df['week'], df['claims'])
axs[0, 0].set_title('Unemployment Claims')
axs[0, 1].plot(df['week'], df['trend.unemploy'], 'tab:orange')
axs[0, 1].set_title('Google trend - unemploy')
axs[1, 0].plot(df['week'], df['vix'], 'tab:green')
axs[1, 0].set_title('VIX')
axs[1, 1].plot(df['week'], df['sp500'], 'tab:red')
axs[1, 1].set_title('S&P500')
# using relatively updated data
df = df[df['week'] > '2018-01-01'].reset_index(drop=True)
test_size = 26
train_df = df[:-test_size]
test_df = df[-test_size:]
###Output
_____no_output_____
###Markdown
Naive Model Here we will use DLT models to compare the model performance with vs. without regression.
###Code
%%time
dlt = DLTFull(
response_col=response_col,
date_col=date_col,
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt.fit(df=train_df)
predicted_df = dlt.predict(df=test_df)
###Output
WARNING:pystan:n_eff / iter below 0.001 indicates that the effective sample size has likely been overestimated
WARNING:pystan:Rhat above 1.1 or below 0.9 indicates that the chains very likely have not mixed
###Markdown
DLT With Regression The regressor columns can be supplied via argument `regressor_col`. Recall the regression formula in **DLT**:$$\hat{y}_t =\mu_t + s_t + r_t \\r_t = \sum_{j}\beta_j x_{jt} \\\beta_j ~\sim \mathcal{N}(\mu_j, \sigma_j^2)$$Let's use the default where $\mu_j = 0$ and $\sigma_j = 1$. In addition, we can set a *sign* constraint for each coefficient $\beta_j$. This is can be done by supplying the `regressor_sign` as a list where elements are in one of followings:* '=': $\beta_j ~\sim \mathcal{N}(0, \sigma_j^2)$ i.e. $\beta_j \in (-\inf, \inf)$* '+': $\beta_j ~\sim \mathcal{N}^+(0, \sigma_j^2)$ i.e. $\beta_j \in [0, \inf)$* '-': $\beta_j ~\sim \mathcal{N}^-(0, \sigma_j^2)$ i.e. $\beta_j \in (-\inf, 0]$Based on some intuition, it's reasonable to assume search terms such as "unemployment", "filling" and **VIX** index to be positively correlated and stock index such as **SP500** to be negatively correlated to the outcome. Then we will leave whatever unsured as a regular regressor.
###Code
%%time
dlt_reg = DLTFull(
response_col=response_col,
date_col=date_col,
regressor_col=['trend.unemploy', 'trend.filling', 'trend.job', 'sp500', 'vix'],
regressor_sign=["+", '+', '=', '-', '+'],
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt_reg.fit(df=train_df)
predicted_df_reg = dlt_reg.predict(test_df)
###Output
WARNING:pystan:n_eff / iter below 0.001 indicates that the effective sample size has likely been overestimated
WARNING:pystan:Rhat above 1.1 or below 0.9 indicates that the chains very likely have not mixed
###Markdown
The estimated regressor coefficients can be retrieved via `.get_regression_coefs()`.
###Code
dlt_reg.get_regression_coefs()
###Output
_____no_output_____
###Markdown
DLT with Regression and Informative Priors Assuming users obtain further knowledge on some of the regressors, they could use informative priors ($\mu$, $\sigma$) by replacing the defaults. This can be done via the arguments `regressor_beta_prior` and `regressor_sigma_prior`. These two lists should be of the same lenght as `regressor_col`.
###Code
dlt_reg_adjust = DLTFull(
response_col=response_col,
date_col=date_col,
regressor_col=['trend.unemploy', 'trend.filling', 'trend.job', 'sp500', 'vix'],
regressor_sign=["+", '+', '=', '-', '+'],
regressor_beta_prior=[0.5, 0.25, 0.07, -0.3, 0.03],
regressor_sigma_prior=[0.1] * 5,
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt_reg_adjust.fit(df=train_df)
predicted_df_reg_adjust = dlt_reg_adjust.predict(test_df)
dlt_reg_adjust.get_regression_coefs()
###Output
_____no_output_____
###Markdown
Let's compare the holdout performance by using the built-in function `smape()` .
###Code
import numpy as np
from orbit.diagnostics.metrics import smape
# to reverse the log-transformation
def smape_adjusted(x, y):
x = np.exp(x)
y = np.exp(y)
return smape(x, y)
naive_smape = smape_adjusted(predicted_df['prediction'].values, test_df['claims'].values)
reg_smape = smape_adjusted(predicted_df_reg['prediction'].values, test_df['claims'].values)
reg_adjust_smape = smape_adjusted(predicted_df_reg_adjust['prediction'].values, test_df['claims'].values)
print('Naive Model: {:.3f}\nRegression Model: {:.3f}\nRefined Regression Model: {:.3f}'.format(
naive_smape, reg_smape, reg_adjust_smape
))
###Output
Naive Model: 0.205
Regression Model: 0.153
Refined Regression Model: 0.089
###Markdown
Regression on Simulated DatasetLet's use a simulated dateset to demonstrate sparse regression.
###Code
import pandas as pd
from orbit.constants.palette import QualitativePalette
from orbit.utils.simulation import make_trend, make_regression
from orbit.diagnostics.metrics import mse
###Output
_____no_output_____
###Markdown
We have developed a few utilites to generate simulated data. For details, please refer to our API doc. In brief, we are generating observations $y$ such that$$y_t = l_t + r_t $$where$$r_t = \sum_p^{P} \beta_p x_{p,t}$$ Regular RegressionLet's start with a small number of regressors with $P=10$ and $T=100$.
###Code
NUM_OF_REGRESSORS = 10
SERIES_LEN = 50
SEED = 20210101
# sample some coefficients
COEFS = np.random.default_rng(SEED).uniform(-1, 1, NUM_OF_REGRESSORS)
trend = make_trend(SERIES_LEN, rw_loc=0.01, rw_scale=0.1)
x, regression, coefs = make_regression(series_len=SERIES_LEN, coefs=COEFS)
print(regression.shape, x.shape)
# combine trend and the regression
y = trend + regression
x_cols = [f"x{x}" for x in range(1, NUM_OF_REGRESSORS + 1)]
response_col = "y"
dt_col = "date"
obs_matrix = np.concatenate([y.reshape(-1, 1), x], axis=1)
# make a data frame for orbit inputs
df = pd.DataFrame(obs_matrix, columns=[response_col] + x_cols)
# make some dummy date stamp
dt = pd.date_range(start='2016-01-04', periods=SERIES_LEN, freq="1W")
df['date'] = dt
df.shape
###Output
_____no_output_____
###Markdown
Let's take a peek on the coefficients.
###Code
coefs
###Output
_____no_output_____
###Markdown
Now, let's run a regression with the defaults where we have constant `regressor_sigma_prior` and `regression_penalty` set as `fixed-ridge`. Fixed Ridge Penalty
###Code
%%time
dlt_fridge = DLTFull(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
# this is default
regression_penalty='fixed_ridge',
# fixing the smoothing parameters to learn regression coefficients more effectively
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=4000,
)
dlt_fridge.fit(df=df)
coef_fridge = np.quantile(dlt_fridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=20)
plt.plot(idx, coef_fridge[1], color=QualitativePalette.Line4.value[2], linewidth=lw, drawstyle='steps', label='Fixed-Ridge', alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_fridge[0], coef_fridge[2], step='pre', alpha=0.3, color=QualitativePalette.Line4.value[2])
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.ylim(1, -1)
plt.legend(prop={'size': 20})
plt.grid()
###Output
_____no_output_____
###Markdown
We can also set the `regression_penalty` to be `auto-ridge` in case we are sure what to set for the `regressor_sigma_prior`. Auto-Ridge Penalty Instead of using fixed scale in the coefficients prior, a hyperprior can be assigned to them, i.e.$$ \sigma_j \sim \text{Cauchy}^{+} {(0, \alpha)} $$This can be done by setting `regression_penalty="auto_ridge"` with the argument `auto_ridge_scale` (default of `0.5`) set the hyperprior $\alpha$. We can also supply stan config such as `adapt_delta` to reduce divergence. Check the [here](https://mc-stan.org/rstanarm/reference/adapt_delta.html) for details of `adapt_delta`.
###Code
%%time
dlt_auto_ridge = DLTFull(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
# this is default
regression_penalty='auto_ridge',
# fixing the smoothing parameters to learn regression coefficients more effectively
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=4000,
# reduce divergence
stan_mcmc_control={'adapt_delta':0.9},
)
dlt_auto_ridge.fit(df=df)
coef_auto_ridge = np.quantile(dlt_auto_ridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(idx, coef_auto_ridge[1], color=QualitativePalette.Line4.value[2], linewidth=lw, drawstyle='steps', label='Auto-Ridge', alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_auto_ridge[0], coef_auto_ridge[2], step='pre', alpha=0.3, color=QualitativePalette.Line4.value[2])
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.ylim(1, -1)
plt.legend(prop={'size': 20})
plt.grid();
print('Fixed Ridge MSE:{:.3f}\nAuto Ridge MSE:{:.3f}'.format(
mse(coef_fridge[1], coefs), mse(coef_auto_ridge[1], coefs)
))
###Output
Fixed Ridge MSE:0.082
Auto Ridge MSE:0.079
###Markdown
Sparse RegrssionNow, let's move to a challenging problem with a much higher $P$ to $N$ ratio with a sparsity specified by the parameter `relevance=0.5` under the simulation process.
###Code
NUM_OF_REGRESSORS = 50
SERIES_LEN = 50
SEED = 20210101
COEFS = np.random.default_rng(SEED).uniform(0.3, 0.5, NUM_OF_REGRESSORS)
SIGNS = np.random.default_rng(SEED).choice([1, -1], NUM_OF_REGRESSORS)
# to mimic a either zero or relative observable coefficients
COEFS = COEFS * SIGNS
trend = make_trend(SERIES_LEN, rw_loc=0.01, rw_scale=0.1)
x, regression, coefs = make_regression(series_len=SERIES_LEN, coefs=COEFS, relevance=0.5)
print(regression.shape, x.shape)
# generated sparsed coefficients
coefs
# combine trend and the regression
y = trend + regression
x_cols = [f"x{x}" for x in range(1, NUM_OF_REGRESSORS + 1)]
response_col = "y"
dt_col = "date"
obs_matrix = np.concatenate([y.reshape(-1, 1), x], axis=1)
# make a data frame for orbit inputs
df = pd.DataFrame(obs_matrix, columns=[response_col] + x_cols)
# make some dummy date stamp
dt = pd.date_range(start='2016-01-04', periods=SERIES_LEN, freq="1W")
df['date'] = dt
df.shape
###Output
_____no_output_____
###Markdown
Fixed Ridge Penalty
###Code
dlt_fridge = DLTFull(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=8000,
)
dlt_fridge.fit(df=df)
coef_fridge = np.quantile(dlt_fridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(coef_fridge[1], color=QualitativePalette.Line4.value[2], linewidth=lw, drawstyle='steps', label="Ridge", alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_fridge[0], coef_fridge[2], step='pre', alpha=0.3, color=QualitativePalette.Line4.value[2])
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.legend(prop={'size': 20})
plt.grid();
###Output
_____no_output_____
###Markdown
LASSO PenaltyIn high $P$ to $N$ problems, *LASS0* penalty usually shines compared to *Ridge* penalty.
###Code
dlt_lasso = DLTFull(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
regression_penalty='lasso',
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=8000,
)
dlt_lasso.fit(df=df)
coef_lasso = np.quantile(dlt_lasso._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(coef_lasso[1], color=QualitativePalette.Line4.value[2], linewidth=lw, drawstyle='steps', label="Lasso", alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_lasso[0], coef_lasso[2], step='pre', alpha=0.3, color=QualitativePalette.Line4.value[2])
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.legend(prop={'size': 20})
plt.grid();
print('Fixed Ridge MSE:{:.3f}\nLASSO MSE:{:.3f}'.format(
mse(coef_fridge[1], coefs), mse(coef_lasso[1], coefs)
))
###Output
Fixed Ridge MSE:0.177
LASSO MSE:0.107
###Markdown
Regression with Orbit In this demo, we want to demonstartate how to use the different arguments in the model classes (LGT or DLT) to realize different setups for the regressors. Those could be very useful in practice when tuning the models.
###Code
import pandas as pd
import numpy as np
from orbit.models.lgt import LGTMAP, LGTAggregated, LGTFull
from orbit.models.dlt import DLTMAP, DLTAggregated, DLTFull
from orbit.diagnostics.plot import plot_predicted_data
from orbit.diagnostics.plot import plot_predicted_components
from orbit.utils.dataset import load_iclaims
###Output
_____no_output_____
###Markdown
load data
###Code
df = load_iclaims()
df[['claims', 'trend.unemploy', 'trend.filling', 'trend.job']] = \
np.log(df[['claims', 'trend.unemploy', 'trend.filling', 'trend.job']])
###Output
_____no_output_____
###Markdown
Use regressors and specify their signs The regressor columns can be supplied via argument `regressor_col`. Their signs can be specified via `regressor_sign`, with values either '=' (regular, no restriction) or '+' (positive). These two lists should be of the same lenght. The default values of `regressor_sign` is all '='.
###Code
DATE_COL="week"
RESPONSE_COL="claims"
REGRESSOR_COL=['trend.unemploy', 'trend.filling', 'trend.job']
lgt_mod=LGTAggregated(
response_col=RESPONSE_COL,
date_col=DATE_COL,
regressor_col=REGRESSOR_COL,
regressor_sign=["+", '+', '='],
seasonality=52,
seed=1,
)
lgt_mod.fit(df=df)
###Output
WARNING:pystan:Maximum (flat) parameter count (1000) exceeded: skipping diagnostic tests for n_eff and Rhat.
To run all diagnostics call pystan.check_hmc_diagnostics(fit)
WARNING:pystan:4 of 100 iterations ended with a divergence (4 %).
WARNING:pystan:Try running with adapt_delta larger than 0.8 to remove the divergences.
###Markdown
The estimated regressor coefficients can be retrieved via `.aggregated_posteriors`.
###Code
lgt_mod.get_regression_coefs()
###Output
_____no_output_____
###Markdown
Regression Types In orbit, we have different prior types for the regressiont coefficients:* Fixed Ridge* Lasso* Auto RidgeIn **Fixed Ridge**, it is assumed that $$\beta \sim Gaussian(\beta_{prior}, \sigma_{prior})$$In **Lasso**, it is assumed that $$\beta \sim Laplace(\beta_{prior}, \sigma_{prior})$$In **Auto Ridge**, it is assumed that $$\beta \sim Gaussian(\beta_{prior}, \sigma_{\beta})$$, $$\sigma_\beta \sim \text{Half-Cauchy}(0, \text{ridge_scale})$$ Fixed Ridge
###Code
lgt_mod = LGTAggregated(
response_col=RESPONSE_COL,
date_col=DATE_COL,
regressor_col=REGRESSOR_COL,
regressor_sign=["+", '+', '='],
seasonality=52,
seed=1,
regression_penalty="fixed_ridge",
)
lgt_mod.fit(df=df)
lgt_mod.get_regression_coefs()
###Output
_____no_output_____
###Markdown
Lasso
###Code
lgt_mod = LGTAggregated(
response_col=RESPONSE_COL,
date_col=DATE_COL,
regressor_col=REGRESSOR_COL,
regressor_sign=["+", '+', '='],
seasonality=52,
seed=1,
regression_penalty="lasso",
)
lgt_mod.fit(df=df)
lgt_mod.get_regression_coefs()
###Output
_____no_output_____
###Markdown
Auto Ridge
###Code
lgt_mod = LGTAggregated(
response_col=RESPONSE_COL,
date_col=DATE_COL,
regressor_col=REGRESSOR_COL,
regressor_sign=["+", '+', '='],
seasonality=52,
seed=1,
regression_penalty="auto_ridge",
auto_ridge_scale=0.5,
)
lgt_mod.fit(df=df)
lgt_mod.get_regression_coefs()
###Output
_____no_output_____
###Markdown
Adjust pirors for regressor beta and regressor standard deviation In the model, it is assumed $$\beta \sim Gaussian(\beta_{prior}, \sigma_{prior})$$The default values for $\beta_{prior}$ and $\sigma_{prior}$ are 0 and 1, respectively.Users could adjust them via arguments `regressor_beta_prior` and `regressor_sigma_prior`. These two lists should be of the same lenght as `regressor_col`.
###Code
lgt_mod = LGTAggregated(
response_col=RESPONSE_COL,
date_col=DATE_COL,
regressor_col=REGRESSOR_COL,
regressor_sign=["+", '+', '='],
regressor_beta_prior=[0.05, 0.05, 0],
regressor_sigma_prior=[0.1, 0.1, 0.1],
seasonality=52,
seed=1,
)
lgt_mod.regression_penalty
lgt_mod.fit(df=df)
###Output
WARNING:pystan:Maximum (flat) parameter count (1000) exceeded: skipping diagnostic tests for n_eff and Rhat.
To run all diagnostics call pystan.check_hmc_diagnostics(fit)
###Markdown
Regression with DLT In this notebook, we want to demonstartate how to use different arguments in **DLT** to train a model with various regression settings. We continue to use *iclaims* data for the demo purpose:1. regular regression2. regression with specific signs and priors for regression coefficientsFinally, we will also use a simulated dataset to illustrate different types of regression penalties:1. `fixed-ridge`2. `auto-ridge`3. `lasso`Generally speaking, regression coefficients are more robust under full Bayesian sampling and estimation. Hence, we will use `DTLFull` in the session.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import orbit
from orbit.utils.dataset import load_iclaims
from orbit.models.dlt import DLTFull, DLTMAP
from orbit.diagnostics.plot import plot_predicted_data
from orbit.constants.palette import QualitativePalette
from orbit.utils.plot import get_orbit_style
plt.style.use(get_orbit_style())
print(orbit.__version__)
###Output
1.0.14dev
###Markdown
US Weekly Initial Claims Recall the *iclaims* dataset by previous section. In order to use this data to nowcast the US unemployment claims during COVID-19 period, we extended the dataset to Jan 2021 and added the [S&P 500 (^GSPC)](https://finance.yahoo.com/quote/%5EGSPC/history?period1=1264032000&period2=1611187200&interval=1wk&filter=history&frequency=1wk&includeAdjustedClose=true) and [VIX](https://finance.yahoo.com/quote/%5EVIX/history?p=%5EVIX) Index historical data for the same period.The data is standardized and log-transformed for the model fitting purpose.
###Code
# load data
df = load_iclaims(end_date='2021-01-03')
date_col = 'week'
response_col = 'claims'
df.dtypes
df.head(5)
###Output
_____no_output_____
###Markdown
We can see form the plot below, there are seasonality, trend, and as well as a huge changpoint due the impact of COVID-19.
###Code
fig, axs = plt.subplots(2, 2,figsize=(20,8))
axs[0, 0].plot(df['week'], df['claims'])
axs[0, 0].set_title('Unemployment Claims')
axs[0, 1].plot(df['week'], df['trend.unemploy'], 'tab:orange')
axs[0, 1].set_title('Google trend - unemploy')
axs[1, 0].plot(df['week'], df['vix'], 'tab:green')
axs[1, 0].set_title('VIX')
axs[1, 1].plot(df['week'], df['sp500'], 'tab:red')
axs[1, 1].set_title('S&P500')
# using relatively updated data
df = df[df['week'] > '2018-01-01'].reset_index(drop=True)
test_size = 26
train_df = df[:-test_size]
test_df = df[-test_size:]
###Output
_____no_output_____
###Markdown
Naive Model Here we will use DLT models to compare the model performance with vs. without regression.
###Code
%%time
dlt = DLTFull(
response_col=response_col,
date_col=date_col,
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt.fit(df=train_df)
predicted_df = dlt.predict(df=test_df)
###Output
WARNING:pystan:n_eff / iter below 0.001 indicates that the effective sample size has likely been overestimated
WARNING:pystan:Rhat above 1.1 or below 0.9 indicates that the chains very likely have not mixed
###Markdown
DLT With Regression The regressor columns can be supplied via argument `regressor_col`. Recall the regression formula in **DLT**:$$\hat{y}_t =\mu_t + s_t + r_t \\r_t = \sum_{j}\beta_j x_{jt} \\\beta_j ~\sim \mathcal{N}(\mu_j, \sigma_j^2)$$Let's use the default where $\mu_j = 0$ and $\sigma_j = 1$. In addition, we can set a *sign* constraint for each coefficient $\beta_j$. This is can be done by supplying the `regressor_sign` as a list where elements are in one of followings:* '=': $\beta_j ~\sim \mathcal{N}(0, \sigma_j^2)$ i.e. $\beta_j \in (-\inf, \inf)$* '+': $\beta_j ~\sim \mathcal{N}^+(0, \sigma_j^2)$ i.e. $\beta_j \in [0, \inf)$* '-': $\beta_j ~\sim \mathcal{N}^-(0, \sigma_j^2)$ i.e. $\beta_j \in (-\inf, 0]$Based on some intuition, it's reasonable to assume search terms such as "unemployment", "filling" and **VIX** index to be positively correlated and stock index such as **SP500** to be negatively correlated to the outcome. Then we will leave whatever unsured as a regular regressor.
###Code
%%time
dlt_reg = DLTFull(
response_col=response_col,
date_col=date_col,
regressor_col=['trend.unemploy', 'trend.filling', 'trend.job', 'sp500', 'vix'],
regressor_sign=["+", '+', '=', '-', '+'],
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt_reg.fit(df=train_df)
predicted_df_reg = dlt_reg.predict(test_df)
###Output
WARNING:pystan:n_eff / iter below 0.001 indicates that the effective sample size has likely been overestimated
WARNING:pystan:Rhat above 1.1 or below 0.9 indicates that the chains very likely have not mixed
###Markdown
The estimated regressor coefficients can be retrieved via `.get_regression_coefs()`.
###Code
dlt_reg.get_regression_coefs()
###Output
_____no_output_____
###Markdown
DLT with Regression and Informative Priors Assuming users obtain further knowledge on some of the regressors, they could use informative priors ($\mu$, $\sigma$) by replacing the defaults. This can be done via the arguments `regressor_beta_prior` and `regressor_sigma_prior`. These two lists should be of the same lenght as `regressor_col`.
###Code
dlt_reg_adjust = DLTFull(
response_col=response_col,
date_col=date_col,
regressor_col=['trend.unemploy', 'trend.filling', 'trend.job', 'sp500', 'vix'],
regressor_sign=["+", '+', '=', '-', '+'],
regressor_beta_prior=[0.5, 0.25, 0.07, -0.3, 0.03],
regressor_sigma_prior=[0.1] * 5,
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt_reg_adjust.fit(df=train_df)
predicted_df_reg_adjust = dlt_reg_adjust.predict(test_df)
dlt_reg_adjust.get_regression_coefs()
###Output
_____no_output_____
###Markdown
Let's compare the holdout performance by using the built-in function `smape()` .
###Code
import numpy as np
from orbit.diagnostics.metrics import smape
# to reverse the log-transformation
def smape_adjusted(x, y):
x = np.exp(x)
y = np.exp(y)
return smape(x, y)
naive_smape = smape_adjusted(predicted_df['prediction'].values, test_df['claims'].values)
reg_smape = smape_adjusted(predicted_df_reg['prediction'].values, test_df['claims'].values)
reg_adjust_smape = smape_adjusted(predicted_df_reg_adjust['prediction'].values, test_df['claims'].values)
print('Naive Model: {:.3f}\nRegression Model: {:.3f}\nRefined Regression Model: {:.3f}'.format(
naive_smape, reg_smape, reg_adjust_smape
))
###Output
Naive Model: 0.205
Regression Model: 0.153
Refined Regression Model: 0.089
###Markdown
Regression on Simulated DatasetLet's use a simulated dateset to demonstrate sparse regression.
###Code
import pandas as pd
from orbit.constants.palette import QualitativePalette
from orbit.utils.simulation import make_trend, make_regression
from orbit.diagnostics.metrics import mse
###Output
_____no_output_____
###Markdown
We have developed a few utilites to generate simulated data. For details, please refer to our API doc. In brief, we are generating observations $y$ such that$$y_t = l_t + r_t $$where$$r_t = \sum_p^{P} \beta_p x_{p,t}$$ Regular RegressionLet's start with a small number of regressors with $P=10$ and $T=100$.
###Code
NUM_OF_REGRESSORS = 10
SERIES_LEN = 50
SEED = 20210101
# sample some coefficients
COEFS = np.random.default_rng(SEED).uniform(-1, 1, NUM_OF_REGRESSORS)
trend = make_trend(SERIES_LEN, rw_loc=0.01, rw_scale=0.1)
x, regression, coefs = make_regression(series_len=SERIES_LEN, coefs=COEFS)
print(regression.shape, x.shape)
# combine trend and the regression
y = trend + regression
x_cols = [f"x{x}" for x in range(1, NUM_OF_REGRESSORS + 1)]
response_col = "y"
dt_col = "date"
obs_matrix = np.concatenate([y.reshape(-1, 1), x], axis=1)
# make a data frame for orbit inputs
df = pd.DataFrame(obs_matrix, columns=[response_col] + x_cols)
# make some dummy date stamp
dt = pd.date_range(start='2016-01-04', periods=SERIES_LEN, freq="1W")
df['date'] = dt
df.shape
###Output
_____no_output_____
###Markdown
Let's take a peek on the coefficients.
###Code
coefs
###Output
_____no_output_____
###Markdown
Now, let's run a regression with the defaults where we have constant `regressor_sigma_prior` and `regression_penalty` set as `fixed-ridge`. Fixed Ridge Penalty
###Code
%%time
dlt_fridge = DLTFull(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
# this is default
regression_penalty='fixed_ridge',
# fixing the smoothing parameters to learn regression coefficients more effectively
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=4000,
)
dlt_fridge.fit(df=df)
coef_fridge = np.quantile(dlt_fridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=20)
plt.plot(idx, coef_fridge[1], color=QualitativePalette.Line4.value[2], linewidth=lw, drawstyle='steps', label='Fixed-Ridge', alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_fridge[0], coef_fridge[2], step='pre', alpha=0.3, color=QualitativePalette.Line4.value[2])
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.ylim(1, -1)
plt.legend(prop={'size': 20})
plt.grid()
###Output
_____no_output_____
###Markdown
We can also set the `regression_penalty` to be `auto-ridge` in case we are sure what to set for the `regressor_sigma_prior`. Auto-Ridge Penalty Instead of using fixed scale in the coefficients prior, a hyperprior can be assigned to them, i.e.$$ \sigma_j \sim \text{Cauchy}^{+} {(0, \alpha)} $$This can be done by setting `regression_penalty="auto_ridge"` with the argument `auto_ridge_scale` (default of `0.5`) set the hyperprior $\alpha$. We can also supply stan config such as `adapt_delta` to reduce divergence. Check the [here](https://mc-stan.org/rstanarm/reference/adapt_delta.html) for details of `adapt_delta`.
###Code
%%time
dlt_auto_ridge = DLTFull(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
# this is default
regression_penalty='auto_ridge',
# fixing the smoothing parameters to learn regression coefficients more effectively
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=4000,
# reduce divergence
stan_mcmc_control={'adapt_delta':0.9},
)
dlt_auto_ridge.fit(df=df)
coef_auto_ridge = np.quantile(dlt_auto_ridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(idx, coef_auto_ridge[1], color=QualitativePalette.Line4.value[2], linewidth=lw, drawstyle='steps', label='Auto-Ridge', alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_auto_ridge[0], coef_auto_ridge[2], step='pre', alpha=0.3, color=QualitativePalette.Line4.value[2])
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.ylim(1, -1)
plt.legend(prop={'size': 20})
plt.grid();
print('Fixed Ridge MSE:{:.3f}\nAuto Ridge MSE:{:.3f}'.format(
mse(coef_fridge[1], coefs), mse(coef_auto_ridge[1], coefs)
))
###Output
Fixed Ridge MSE:0.082
Auto Ridge MSE:0.079
###Markdown
Sparse RegrssionNow, let's move to a challenging problem with a much higher $P$ to $N$ ratio with a sparsity specified by the parameter `relevance=0.5` under the simulation process.
###Code
NUM_OF_REGRESSORS = 50
SERIES_LEN = 50
SEED = 20210101
COEFS = np.random.default_rng(SEED).uniform(0.3, 0.5, NUM_OF_REGRESSORS)
SIGNS = np.random.default_rng(SEED).choice([1, -1], NUM_OF_REGRESSORS)
# to mimic a either zero or relative observable coefficients
COEFS = COEFS * SIGNS
trend = make_trend(SERIES_LEN, rw_loc=0.01, rw_scale=0.1)
x, regression, coefs = make_regression(series_len=SERIES_LEN, coefs=COEFS, relevance=0.5)
print(regression.shape, x.shape)
# generated sparsed coefficients
coefs
# combine trend and the regression
y = trend + regression
x_cols = [f"x{x}" for x in range(1, NUM_OF_REGRESSORS + 1)]
response_col = "y"
dt_col = "date"
obs_matrix = np.concatenate([y.reshape(-1, 1), x], axis=1)
# make a data frame for orbit inputs
df = pd.DataFrame(obs_matrix, columns=[response_col] + x_cols)
# make some dummy date stamp
dt = pd.date_range(start='2016-01-04', periods=SERIES_LEN, freq="1W")
df['date'] = dt
df.shape
###Output
_____no_output_____
###Markdown
Fixed Ridge Penalty
###Code
dlt_fridge = DLTFull(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=8000,
)
dlt_fridge.fit(df=df)
coef_fridge = np.quantile(dlt_fridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(coef_fridge[1], color=QualitativePalette.Line4.value[2], linewidth=lw, drawstyle='steps', label="Ridge", alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_fridge[0], coef_fridge[2], step='pre', alpha=0.3, color=QualitativePalette.Line4.value[2])
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.legend(prop={'size': 20})
plt.grid();
###Output
_____no_output_____
###Markdown
LASSO PenaltyIn high $P$ to $N$ problems, *LASS0* penalty usually shines compared to *Ridge* penalty.
###Code
dlt_lasso = DLTFull(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
regression_penalty='lasso',
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=8000,
)
dlt_lasso.fit(df=df)
coef_lasso = np.quantile(dlt_lasso._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(coef_lasso[1], color=QualitativePalette.Line4.value[2], linewidth=lw, drawstyle='steps', label="Lasso", alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_lasso[0], coef_lasso[2], step='pre', alpha=0.3, color=QualitativePalette.Line4.value[2])
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.legend(prop={'size': 20})
plt.grid();
print('Fixed Ridge MSE:{:.3f}\nLASSO MSE:{:.3f}'.format(
mse(coef_fridge[1], coefs), mse(coef_lasso[1], coefs)
))
###Output
Fixed Ridge MSE:0.162
LASSO MSE:0.102
###Markdown
Regression with DLT In this notebook, we want to demonstartate how to use different arguments in **DLT** to train a model with various regression settings. We continue to use *iclaims* data for the demo purpose:1. regular regression2. regression with specific signs and priors for regression coefficientsFinally, we will also use a simulated dataset to illustrate different types of regression penalties:1. `fixed-ridge`2. `auto-ridge`3. `lasso`Generally speaking, regression coefficients are more robust under full Bayesian sampling and estimation. Hence, we will use `estimator=stan-mcmc` (the default) in the session.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import orbit
from orbit.utils.dataset import load_iclaims
from orbit.models import DLT
from orbit.diagnostics.plot import plot_predicted_data
from orbit.constants.palette import OrbitPalette
print(orbit.__version__)
###Output
1.1.0dev
###Markdown
US Weekly Initial Claims Recall the *iclaims* dataset by previous section. In order to use this data to nowcast the US unemployment claims during COVID-19 period, we extended the dataset to Jan 2021 and added the [S&P 500 (^GSPC)](https://finance.yahoo.com/quote/%5EGSPC/history?period1=1264032000&period2=1611187200&interval=1wk&filter=history&frequency=1wk&includeAdjustedClose=true) and [VIX](https://finance.yahoo.com/quote/%5EVIX/history?p=%5EVIX) Index historical data for the same period.The data is standardized and log-transformed for the model fitting purpose.
###Code
# load data
df = load_iclaims(end_date='2021-01-03')
date_col = 'week'
response_col = 'claims'
df.dtypes
df.head(5)
###Output
_____no_output_____
###Markdown
We can see form the plot below, there are seasonality, trend, and as well as a huge changpoint due the impact of COVID-19.
###Code
fig, axs = plt.subplots(2, 2,figsize=(20,8))
axs[0, 0].plot(df['week'], df['claims'])
axs[0, 0].set_title('Unemployment Claims')
axs[0, 1].plot(df['week'], df['trend.unemploy'], 'tab:orange')
axs[0, 1].set_title('Google trend - unemploy')
axs[1, 0].plot(df['week'], df['vix'], 'tab:green')
axs[1, 0].set_title('VIX')
axs[1, 1].plot(df['week'], df['sp500'], 'tab:red')
axs[1, 1].set_title('S&P500')
# using relatively updated data
df = df[df['week'] > '2018-01-01'].reset_index(drop=True)
test_size = 26
train_df = df[:-test_size]
test_df = df[-test_size:]
###Output
_____no_output_____
###Markdown
Naive Model Here we will use DLT models to compare the model performance with vs. without regression.
###Code
%%time
dlt = DLT(
response_col=response_col,
date_col=date_col,
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt.fit(df=train_df)
predicted_df = dlt.predict(df=test_df)
###Output
WARNING:pystan:n_eff / iter below 0.001 indicates that the effective sample size has likely been overestimated
WARNING:pystan:Rhat above 1.1 or below 0.9 indicates that the chains very likely have not mixed
###Markdown
DLT With Regression The regressor columns can be supplied via argument `regressor_col`. Recall the regression formula in **DLT**:$$\hat{y}_t =\mu_t + s_t + r_t \\r_t = \sum_{j}\beta_j x_{jt} \\\beta_j ~\sim \mathcal{N}(\mu_j, \sigma_j^2)$$Let's use the default where $\mu_j = 0$ and $\sigma_j = 1$. In addition, we can set a *sign* constraint for each coefficient $\beta_j$. This is can be done by supplying the `regressor_sign` as a list where elements are in one of followings:* '=': $\beta_j ~\sim \mathcal{N}(0, \sigma_j^2)$ i.e. $\beta_j \in (-\inf, \inf)$* '+': $\beta_j ~\sim \mathcal{N}^+(0, \sigma_j^2)$ i.e. $\beta_j \in [0, \inf)$* '-': $\beta_j ~\sim \mathcal{N}^-(0, \sigma_j^2)$ i.e. $\beta_j \in (-\inf, 0]$Based on some intuition, it's reasonable to assume search terms such as "unemployment", "filling" and **VIX** index to be positively correlated and stock index such as **SP500** to be negatively correlated to the outcome. Then we will leave whatever unsured as a regular regressor.
###Code
%%time
dlt_reg = DLT(
response_col=response_col,
date_col=date_col,
regressor_col=['trend.unemploy', 'trend.filling', 'trend.job', 'sp500', 'vix'],
regressor_sign=["+", '+', '=', '-', '+'],
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt_reg.fit(df=train_df)
predicted_df_reg = dlt_reg.predict(test_df)
###Output
WARNING:pystan:n_eff / iter below 0.001 indicates that the effective sample size has likely been overestimated
WARNING:pystan:Rhat above 1.1 or below 0.9 indicates that the chains very likely have not mixed
###Markdown
The estimated regressor coefficients can be retrieved via `.get_regression_coefs()`.
###Code
dlt_reg.get_regression_coefs()
###Output
_____no_output_____
###Markdown
DLT with Regression and Informative Priors Assuming users obtain further knowledge on some of the regressors, they could use informative priors ($\mu$, $\sigma$) by replacing the defaults. This can be done via the arguments `regressor_beta_prior` and `regressor_sigma_prior`. These two lists should be of the same lenght as `regressor_col`.
###Code
dlt_reg_adjust = DLT(
response_col=response_col,
date_col=date_col,
regressor_col=['trend.unemploy', 'trend.filling', 'trend.job', 'sp500', 'vix'],
regressor_sign=["+", '+', '=', '-', '+'],
regressor_beta_prior=[0.5, 0.25, 0.07, -0.3, 0.03],
regressor_sigma_prior=[0.1] * 5,
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt_reg_adjust.fit(df=train_df)
predicted_df_reg_adjust = dlt_reg_adjust.predict(test_df)
dlt_reg_adjust.get_regression_coefs()
###Output
_____no_output_____
###Markdown
Let's compare the holdout performance by using the built-in function `smape()` .
###Code
import numpy as np
from orbit.diagnostics.metrics import smape
# to reverse the log-transformation
def smape_adjusted(x, y):
x = np.exp(x)
y = np.exp(y)
return smape(x, y)
naive_smape = smape_adjusted(predicted_df['prediction'].values, test_df['claims'].values)
reg_smape = smape_adjusted(predicted_df_reg['prediction'].values, test_df['claims'].values)
reg_adjust_smape = smape_adjusted(predicted_df_reg_adjust['prediction'].values, test_df['claims'].values)
print('Naive Model: {:.3f}\nRegression Model: {:.3f}\nRefined Regression Model: {:.3f}'.format(
naive_smape, reg_smape, reg_adjust_smape
))
###Output
Naive Model: 0.205
Regression Model: 0.153
Refined Regression Model: 0.089
###Markdown
Regression on Simulated DatasetLet's use a simulated dateset to demonstrate sparse regression.
###Code
import pandas as pd
from orbit.utils.simulation import make_trend, make_regression
from orbit.diagnostics.metrics import mse
###Output
_____no_output_____
###Markdown
We have developed a few utilites to generate simulated data. For details, please refer to our API doc. In brief, we are generating observations $y$ such that$$y_t = l_t + r_t $$where$$r_t = \sum_p^{P} \beta_p x_{p,t}$$ Regular RegressionLet's start with a small number of regressors with $P=10$ and $T=100$.
###Code
NUM_OF_REGRESSORS = 10
SERIES_LEN = 50
SEED = 20210101
# sample some coefficients
COEFS = np.random.default_rng(SEED).uniform(-1, 1, NUM_OF_REGRESSORS)
trend = make_trend(SERIES_LEN, rw_loc=0.01, rw_scale=0.1)
x, regression, coefs = make_regression(series_len=SERIES_LEN, coefs=COEFS)
print(regression.shape, x.shape)
# combine trend and the regression
y = trend + regression
x_cols = [f"x{x}" for x in range(1, NUM_OF_REGRESSORS + 1)]
response_col = "y"
dt_col = "date"
obs_matrix = np.concatenate([y.reshape(-1, 1), x], axis=1)
# make a data frame for orbit inputs
df = pd.DataFrame(obs_matrix, columns=[response_col] + x_cols)
# make some dummy date stamp
dt = pd.date_range(start='2016-01-04', periods=SERIES_LEN, freq="1W")
df['date'] = dt
df.shape
###Output
_____no_output_____
###Markdown
Let's take a peek on the coefficients.
###Code
coefs
###Output
_____no_output_____
###Markdown
Now, let's run a regression with the defaults where we have constant `regressor_sigma_prior` and `regression_penalty` set as `fixed-ridge`. Fixed Ridge Penalty
###Code
%%time
dlt_fridge = DLT(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
# this is default
regression_penalty='fixed_ridge',
# fixing the smoothing parameters to learn regression coefficients more effectively
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=4000,
)
dlt_fridge.fit(df=df)
coef_fridge = np.quantile(dlt_fridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=20)
plt.plot(idx, coef_fridge[1], color=OrbitPalette.GREEN.value, linewidth=lw, drawstyle='steps', label='Fixed-Ridge', alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_fridge[0], coef_fridge[2], step='pre', alpha=0.3, color=OrbitPalette.GREEN.value)
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.ylim(1, -1)
plt.legend(prop={'size': 20})
plt.grid()
###Output
_____no_output_____
###Markdown
We can also set the `regression_penalty` to be `auto-ridge` in case we are sure what to set for the `regressor_sigma_prior`. Auto-Ridge Penalty Instead of using fixed scale in the coefficients prior, a hyperprior can be assigned to them, i.e.$$ \sigma_j \sim \text{Cauchy}^{+} {(0, \alpha)} $$This can be done by setting `regression_penalty="auto_ridge"` with the argument `auto_ridge_scale` (default of `0.5`) set the hyperprior $\alpha$. We can also supply stan config such as `adapt_delta` to reduce divergence. Check the [here](https://mc-stan.org/rstanarm/reference/adapt_delta.html) for details of `adapt_delta`.
###Code
%%time
dlt_auto_ridge = DLT(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
# this is default
regression_penalty='auto_ridge',
# fixing the smoothing parameters to learn regression coefficients more effectively
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=4000,
# reduce divergence
stan_mcmc_control={'adapt_delta':0.9},
)
dlt_auto_ridge.fit(df=df)
coef_auto_ridge = np.quantile(dlt_auto_ridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(idx, coef_auto_ridge[1], color=OrbitPalette.GREEN.value, linewidth=lw, drawstyle='steps', label='Auto-Ridge', alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_auto_ridge[0], coef_auto_ridge[2], step='pre', alpha=0.3, color=OrbitPalette.GREEN.value)
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.ylim(1, -1)
plt.legend(prop={'size': 20})
plt.grid();
print('Fixed Ridge MSE:{:.3f}\nAuto Ridge MSE:{:.3f}'.format(
mse(coef_fridge[1], coefs), mse(coef_auto_ridge[1], coefs)
))
###Output
Fixed Ridge MSE:0.082
Auto Ridge MSE:0.079
###Markdown
Sparse RegrssionNow, let's move to a challenging problem with a much higher $P$ to $N$ ratio with a sparsity specified by the parameter `relevance=0.5` under the simulation process.
###Code
NUM_OF_REGRESSORS = 50
SERIES_LEN = 50
SEED = 20210101
COEFS = np.random.default_rng(SEED).uniform(0.3, 0.5, NUM_OF_REGRESSORS)
SIGNS = np.random.default_rng(SEED).choice([1, -1], NUM_OF_REGRESSORS)
# to mimic a either zero or relative observable coefficients
COEFS = COEFS * SIGNS
trend = make_trend(SERIES_LEN, rw_loc=0.01, rw_scale=0.1)
x, regression, coefs = make_regression(series_len=SERIES_LEN, coefs=COEFS, relevance=0.5)
print(regression.shape, x.shape)
# generated sparsed coefficients
coefs
# combine trend and the regression
y = trend + regression
x_cols = [f"x{x}" for x in range(1, NUM_OF_REGRESSORS + 1)]
response_col = "y"
dt_col = "date"
obs_matrix = np.concatenate([y.reshape(-1, 1), x], axis=1)
# make a data frame for orbit inputs
df = pd.DataFrame(obs_matrix, columns=[response_col] + x_cols)
# make some dummy date stamp
dt = pd.date_range(start='2016-01-04', periods=SERIES_LEN, freq="1W")
df['date'] = dt
df.shape
###Output
_____no_output_____
###Markdown
Fixed Ridge Penalty
###Code
dlt_fridge = DLT(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=8000,
)
dlt_fridge.fit(df=df)
coef_fridge = np.quantile(dlt_fridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(coef_fridge[1], color=OrbitPalette.GREEN.value, linewidth=lw, drawstyle='steps', label="Ridge", alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_fridge[0], coef_fridge[2], step='pre', alpha=0.3, color=OrbitPalette.GREEN.value)
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.legend(prop={'size': 20})
plt.grid();
###Output
_____no_output_____
###Markdown
LASSO PenaltyIn high $P$ to $N$ problems, *LASS0* penalty usually shines compared to *Ridge* penalty.
###Code
dlt_lasso = DLT(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
regression_penalty='lasso',
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=8000,
)
dlt_lasso.fit(df=df)
coef_lasso = np.quantile(dlt_lasso._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(coef_lasso[1], color=OrbitPalette.GREEN.value, linewidth=lw, drawstyle='steps', label="Lasso", alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_lasso[0], coef_lasso[2], step='pre', alpha=0.3, color=OrbitPalette.GREEN.value)
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.legend(prop={'size': 20})
plt.grid();
print('Fixed Ridge MSE:{:.3f}\nLASSO MSE:{:.3f}'.format(
mse(coef_fridge[1], coefs), mse(coef_lasso[1], coefs)
))
###Output
Fixed Ridge MSE:0.162
LASSO MSE:0.102
###Markdown
Regression with DLT In this notebook, we want to demonstartate how to use different arguments in **DLT** to train a model with various regression settings. We continue to use *iclaims* data for the demo purpose:1. regular regression2. regression with specific signs and priors for regression coefficientsFinally, we will also use a simulated dataset to illustrate different types of regression penalties:1. `fixed-ridge`2. `auto-ridge`3. `lasso`Generally speaking, regression coefficients are more robust under full Bayesian sampling and estimation. Hence, we will use `DTLFull` in the session.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import orbit
from orbit.utils.dataset import load_iclaims
from orbit.models.dlt import DLTFull, DLTMAP
from orbit.diagnostics.plot import plot_predicted_data
from orbit.constants.palette import QualitativePalette
from orbit.utils.plot import get_orbit_style
plt.style.use(get_orbit_style())
print(orbit.__version__)
###Output
1.0.17
###Markdown
US Weekly Initial Claims Recall the *iclaims* dataset by previous section. In order to use this data to nowcast the US unemployment claims during COVID-19 period, we extended the dataset to Jan 2021 and added the [S&P 500 (^GSPC)](https://finance.yahoo.com/quote/%5EGSPC/history?period1=1264032000&period2=1611187200&interval=1wk&filter=history&frequency=1wk&includeAdjustedClose=true) and [VIX](https://finance.yahoo.com/quote/%5EVIX/history?p=%5EVIX) Index historical data for the same period.The data is standardized and log-transformed for the model fitting purpose.
###Code
# load data
df = load_iclaims(end_date='2021-01-03')
date_col = 'week'
response_col = 'claims'
df.dtypes
df.head(5)
###Output
_____no_output_____
###Markdown
We can see form the plot below, there are seasonality, trend, and as well as a huge changpoint due the impact of COVID-19.
###Code
fig, axs = plt.subplots(2, 2,figsize=(20,8))
axs[0, 0].plot(df['week'], df['claims'])
axs[0, 0].set_title('Unemployment Claims')
axs[0, 1].plot(df['week'], df['trend.unemploy'], 'tab:orange')
axs[0, 1].set_title('Google trend - unemploy')
axs[1, 0].plot(df['week'], df['vix'], 'tab:green')
axs[1, 0].set_title('VIX')
axs[1, 1].plot(df['week'], df['sp500'], 'tab:red')
axs[1, 1].set_title('S&P500')
# using relatively updated data
df = df[df['week'] > '2018-01-01'].reset_index(drop=True)
test_size = 26
train_df = df[:-test_size]
test_df = df[-test_size:]
###Output
_____no_output_____
###Markdown
Naive Model Here we will use DLT models to compare the model performance with vs. without regression.
###Code
%%time
dlt = DLTFull(
response_col=response_col,
date_col=date_col,
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt.fit(df=train_df)
predicted_df = dlt.predict(df=test_df)
###Output
WARNING:pystan:n_eff / iter below 0.001 indicates that the effective sample size has likely been overestimated
WARNING:pystan:Rhat above 1.1 or below 0.9 indicates that the chains very likely have not mixed
###Markdown
DLT With Regression The regressor columns can be supplied via argument `regressor_col`. Recall the regression formula in **DLT**:$$\hat{y}_t =\mu_t + s_t + r_t \\r_t = \sum_{j}\beta_j x_{jt} \\\beta_j ~\sim \mathcal{N}(\mu_j, \sigma_j^2)$$Let's use the default where $\mu_j = 0$ and $\sigma_j = 1$. In addition, we can set a *sign* constraint for each coefficient $\beta_j$. This is can be done by supplying the `regressor_sign` as a list where elements are in one of followings:* '=': $\beta_j ~\sim \mathcal{N}(0, \sigma_j^2)$ i.e. $\beta_j \in (-\inf, \inf)$* '+': $\beta_j ~\sim \mathcal{N}^+(0, \sigma_j^2)$ i.e. $\beta_j \in [0, \inf)$* '-': $\beta_j ~\sim \mathcal{N}^-(0, \sigma_j^2)$ i.e. $\beta_j \in (-\inf, 0]$Based on some intuition, it's reasonable to assume search terms such as "unemployment", "filling" and **VIX** index to be positively correlated and stock index such as **SP500** to be negatively correlated to the outcome. Then we will leave whatever unsured as a regular regressor.
###Code
%%time
dlt_reg = DLTFull(
response_col=response_col,
date_col=date_col,
regressor_col=['trend.unemploy', 'trend.filling', 'trend.job', 'sp500', 'vix'],
regressor_sign=["+", '+', '=', '-', '+'],
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt_reg.fit(df=train_df)
predicted_df_reg = dlt_reg.predict(test_df)
###Output
WARNING:pystan:n_eff / iter below 0.001 indicates that the effective sample size has likely been overestimated
WARNING:pystan:Rhat above 1.1 or below 0.9 indicates that the chains very likely have not mixed
###Markdown
The estimated regressor coefficients can be retrieved via `.get_regression_coefs()`.
###Code
dlt_reg.get_regression_coefs()
###Output
_____no_output_____
###Markdown
DLT with Regression and Informative Priors Assuming users obtain further knowledge on some of the regressors, they could use informative priors ($\mu$, $\sigma$) by replacing the defaults. This can be done via the arguments `regressor_beta_prior` and `regressor_sigma_prior`. These two lists should be of the same lenght as `regressor_col`.
###Code
dlt_reg_adjust = DLTFull(
response_col=response_col,
date_col=date_col,
regressor_col=['trend.unemploy', 'trend.filling', 'trend.job', 'sp500', 'vix'],
regressor_sign=["+", '+', '=', '-', '+'],
regressor_beta_prior=[0.5, 0.25, 0.07, -0.3, 0.03],
regressor_sigma_prior=[0.1] * 5,
seasonality=52,
seed=8888,
num_warmup=4000,
)
dlt_reg_adjust.fit(df=train_df)
predicted_df_reg_adjust = dlt_reg_adjust.predict(test_df)
dlt_reg_adjust.get_regression_coefs()
###Output
_____no_output_____
###Markdown
Let's compare the holdout performance by using the built-in function `smape()` .
###Code
import numpy as np
from orbit.diagnostics.metrics import smape
# to reverse the log-transformation
def smape_adjusted(x, y):
x = np.exp(x)
y = np.exp(y)
return smape(x, y)
naive_smape = smape_adjusted(predicted_df['prediction'].values, test_df['claims'].values)
reg_smape = smape_adjusted(predicted_df_reg['prediction'].values, test_df['claims'].values)
reg_adjust_smape = smape_adjusted(predicted_df_reg_adjust['prediction'].values, test_df['claims'].values)
print('Naive Model: {:.3f}\nRegression Model: {:.3f}\nRefined Regression Model: {:.3f}'.format(
naive_smape, reg_smape, reg_adjust_smape
))
###Output
Naive Model: 0.205
Regression Model: 0.153
Refined Regression Model: 0.089
###Markdown
Regression on Simulated DatasetLet's use a simulated dateset to demonstrate sparse regression.
###Code
import pandas as pd
from orbit.constants.palette import QualitativePalette
from orbit.utils.simulation import make_trend, make_regression
from orbit.diagnostics.metrics import mse
###Output
_____no_output_____
###Markdown
We have developed a few utilites to generate simulated data. For details, please refer to our API doc. In brief, we are generating observations $y$ such that$$y_t = l_t + r_t $$where$$r_t = \sum_p^{P} \beta_p x_{p,t}$$ Regular RegressionLet's start with a small number of regressors with $P=10$ and $T=100$.
###Code
NUM_OF_REGRESSORS = 10
SERIES_LEN = 50
SEED = 20210101
# sample some coefficients
COEFS = np.random.default_rng(SEED).uniform(-1, 1, NUM_OF_REGRESSORS)
trend = make_trend(SERIES_LEN, rw_loc=0.01, rw_scale=0.1)
x, regression, coefs = make_regression(series_len=SERIES_LEN, coefs=COEFS)
print(regression.shape, x.shape)
# combine trend and the regression
y = trend + regression
x_cols = [f"x{x}" for x in range(1, NUM_OF_REGRESSORS + 1)]
response_col = "y"
dt_col = "date"
obs_matrix = np.concatenate([y.reshape(-1, 1), x], axis=1)
# make a data frame for orbit inputs
df = pd.DataFrame(obs_matrix, columns=[response_col] + x_cols)
# make some dummy date stamp
dt = pd.date_range(start='2016-01-04', periods=SERIES_LEN, freq="1W")
df['date'] = dt
df.shape
###Output
_____no_output_____
###Markdown
Let's take a peek on the coefficients.
###Code
coefs
###Output
_____no_output_____
###Markdown
Now, let's run a regression with the defaults where we have constant `regressor_sigma_prior` and `regression_penalty` set as `fixed-ridge`. Fixed Ridge Penalty
###Code
%%time
dlt_fridge = DLTFull(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
# this is default
regression_penalty='fixed_ridge',
# fixing the smoothing parameters to learn regression coefficients more effectively
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=4000,
)
dlt_fridge.fit(df=df)
coef_fridge = np.quantile(dlt_fridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=20)
plt.plot(idx, coef_fridge[1], color=QualitativePalette.Line4.value[2], linewidth=lw, drawstyle='steps', label='Fixed-Ridge', alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_fridge[0], coef_fridge[2], step='pre', alpha=0.3, color=QualitativePalette.Line4.value[2])
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.ylim(1, -1)
plt.legend(prop={'size': 20})
plt.grid()
###Output
_____no_output_____
###Markdown
We can also set the `regression_penalty` to be `auto-ridge` in case we are sure what to set for the `regressor_sigma_prior`. Auto-Ridge Penalty Instead of using fixed scale in the coefficients prior, a hyperprior can be assigned to them, i.e.$$ \sigma_j \sim \text{Cauchy}^{+} {(0, \alpha)} $$This can be done by setting `regression_penalty="auto_ridge"` with the argument `auto_ridge_scale` (default of `0.5`) set the hyperprior $\alpha$. We can also supply stan config such as `adapt_delta` to reduce divergence. Check the [here](https://mc-stan.org/rstanarm/reference/adapt_delta.html) for details of `adapt_delta`.
###Code
%%time
dlt_auto_ridge = DLTFull(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
# this is default
regression_penalty='auto_ridge',
# fixing the smoothing parameters to learn regression coefficients more effectively
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=4000,
# reduce divergence
stan_mcmc_control={'adapt_delta':0.9},
)
dlt_auto_ridge.fit(df=df)
coef_auto_ridge = np.quantile(dlt_auto_ridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(idx, coef_auto_ridge[1], color=QualitativePalette.Line4.value[2], linewidth=lw, drawstyle='steps', label='Auto-Ridge', alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_auto_ridge[0], coef_auto_ridge[2], step='pre', alpha=0.3, color=QualitativePalette.Line4.value[2])
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.ylim(1, -1)
plt.legend(prop={'size': 20})
plt.grid();
print('Fixed Ridge MSE:{:.3f}\nAuto Ridge MSE:{:.3f}'.format(
mse(coef_fridge[1], coefs), mse(coef_auto_ridge[1], coefs)
))
###Output
Fixed Ridge MSE:0.082
Auto Ridge MSE:0.079
###Markdown
Sparse RegrssionNow, let's move to a challenging problem with a much higher $P$ to $N$ ratio with a sparsity specified by the parameter `relevance=0.5` under the simulation process.
###Code
NUM_OF_REGRESSORS = 50
SERIES_LEN = 50
SEED = 20210101
COEFS = np.random.default_rng(SEED).uniform(0.3, 0.5, NUM_OF_REGRESSORS)
SIGNS = np.random.default_rng(SEED).choice([1, -1], NUM_OF_REGRESSORS)
# to mimic a either zero or relative observable coefficients
COEFS = COEFS * SIGNS
trend = make_trend(SERIES_LEN, rw_loc=0.01, rw_scale=0.1)
x, regression, coefs = make_regression(series_len=SERIES_LEN, coefs=COEFS, relevance=0.5)
print(regression.shape, x.shape)
# generated sparsed coefficients
coefs
# combine trend and the regression
y = trend + regression
x_cols = [f"x{x}" for x in range(1, NUM_OF_REGRESSORS + 1)]
response_col = "y"
dt_col = "date"
obs_matrix = np.concatenate([y.reshape(-1, 1), x], axis=1)
# make a data frame for orbit inputs
df = pd.DataFrame(obs_matrix, columns=[response_col] + x_cols)
# make some dummy date stamp
dt = pd.date_range(start='2016-01-04', periods=SERIES_LEN, freq="1W")
df['date'] = dt
df.shape
###Output
_____no_output_____
###Markdown
Fixed Ridge Penalty
###Code
dlt_fridge = DLTFull(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=8000,
)
dlt_fridge.fit(df=df)
coef_fridge = np.quantile(dlt_fridge._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(coef_fridge[1], color=QualitativePalette.Line4.value[2], linewidth=lw, drawstyle='steps', label="Ridge", alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_fridge[0], coef_fridge[2], step='pre', alpha=0.3, color=QualitativePalette.Line4.value[2])
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.legend(prop={'size': 20})
plt.grid();
###Output
_____no_output_____
###Markdown
LASSO PenaltyIn high $P$ to $N$ problems, *LASS0* penalty usually shines compared to *Ridge* penalty.
###Code
dlt_lasso = DLTFull(
response_col=response_col,
date_col=dt_col,
regressor_col=x_cols,
seed=SEED,
regression_penalty='lasso',
level_sm_input=0.01,
slope_sm_input=0.01,
num_warmup=8000,
)
dlt_lasso.fit(df=df)
coef_lasso = np.quantile(dlt_lasso._posterior_samples['beta'], q=[0.05, 0.5, 0.95], axis=0 )
lw=3
idx = np.arange(NUM_OF_REGRESSORS)
plt.figure(figsize=(20, 8))
plt.title("Weights of the model", fontsize=24)
plt.plot(coef_lasso[1], color=QualitativePalette.Line4.value[2], linewidth=lw, drawstyle='steps', label="Lasso", alpha=0.5, linestyle='--')
plt.fill_between(idx, coef_lasso[0], coef_lasso[2], step='pre', alpha=0.3, color=QualitativePalette.Line4.value[2])
plt.plot(coefs, color="black", linewidth=lw, drawstyle='steps', label="Ground truth")
plt.legend(prop={'size': 20})
plt.grid();
print('Fixed Ridge MSE:{:.3f}\nLASSO MSE:{:.3f}'.format(
mse(coef_fridge[1], coefs), mse(coef_lasso[1], coefs)
))
###Output
Fixed Ridge MSE:0.162
LASSO MSE:0.102
|
notebooks/example_NGC1275_test_env.ipynb | ###Markdown
Example to calculate photon-ALP oscillations from NGC 1275 This notebook demonstrates how to calculate the photon-ALP transition probability for NGC 1275, the central AGN of the Perseus cluster. The assumed B-field environments are the same as in Ajello et al. (2016), http://inspirehep.net/record/1432667, and include the cluster field and the magnetic field of the Milky Way.
###Code
from gammaALPs.core import Source, ALP, ModuleList
from gammaALPs.base import environs, transfer
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patheffects import withStroke
from ebltable.tau_from_model import OptDepth
from astropy import constants as c
%matplotlib inline
###Output
_____no_output_____
###Markdown
Set the ALP Initialize an ALP object, that stores the ALP mass $m$ (in neV) and the coupling $g$ (in $10^{-11}\mathrm{GeV}^{-1}$).
###Code
m, g = 1.,1.
alp = ALP(m,g)
###Output
_____no_output_____
###Markdown
Set the source Set the source properties (redshift and sky coordinates) in the ```Source``` containier
###Code
ngc1275 = Source(z = 0.017559, ra = '03h19m48.1s', dec = '+41d30m42s')
print (ngc1275.z)
print (ngc1275.ra, ngc1275.dec)
print (ngc1275.l, ngc1275.b)
###Output
0.017559
49.950416666666655 41.51166666666666
150.57567432060083 -13.261343544296357
###Markdown
Init the module list Initialize the list of transfer modules that will store the different magnetic field environments. Energies are supplied in GeV as ```numpy.ndarray```
###Code
EGeV = np.logspace(1.,3.5,250)
###Output
_____no_output_____
###Markdown
Now initialize the initial photon polarization. Since we are dealing with a gamma-ray source, no ALPs are initially present in the beam (third diagonal element is zero). The polarization density matrix is normalized such that its trace is equal to one, $\mathrm{Tr}(\rho_\mathrm{in}) = 1$.
###Code
pin = np.diag((1.,1.,0.)) * 0.5
m = ModuleList(alp, ngc1275, pin = pin, EGeV = EGeV)
###Output
_____no_output_____
###Markdown
Add modules: Now we add propagation modules for the cluster, the EBL, and the Galactic magnetic field.
###Code
m.add_propagation("ICMGaussTurb",
0, # position of module counted from the source.
nsim = 10, # number of random B-field realizations
B0 = 10., # rms of B field
n0 = 3.9e-2, # normalization of electron density
n2 = 4.05e-3, # second normalization of electron density, see Churazov et al. 2003, Eq. 4
r_abell = 500., # extension of the cluster
r_core = 80., # electron density parameter, see Churazov et al. 2003, Eq. 4
r_core2 = 280., # electron density parameter, see Churazov et al. 2003, Eq. 4
beta = 1.2, # electron density parameter, see Churazov et al. 2003, Eq. 4
beta2= 0.58, # electron density parameter, see Churazov et al. 2003, Eq. 4
eta = 0.5, # scaling of B-field with electron denstiy
kL = 0.18, # maximum turbulence scale in kpc^-1, taken from A2199 cool-core cluster, see Vacca et al. 2012
kH = 9., # minimum turbulence scale, taken from A2199 cool-core cluster, see Vacca et al. 2012
q = -2.1, # turbulence spectral index, taken from A2199 cool-core cluster, see Vacca et al. 2012
seed=0 # random seed for reproducability, set to None for random seed.
)
m.add_propagation("EBL",1, model = 'dominguez') # EBL attenuation comes second, after beam has left cluster
m.add_propagation("GMF",2, model = 'jansson12', model_sum = 'ASS') # finally, the beam enters the Milky Way Field
###Output
[0;36menvirons.py:[0;35m 431[0;0m --- [1;36mINFO[1;0m: Using inputted chi
[0;36menvirons.py:[0;35m1039[0;0m --- [1;36mINFO[1;0m: Using inputted chi
###Markdown
List the module names:
###Code
print(m.modules.keys())
###Output
['MixICMGaussTurb', 'OptDepth', 'MixGMF']
###Markdown
We can also change the ALP parameters before running the modules:
###Code
m.alp.m = 30.
m.alp.g = 0.5
###Output
_____no_output_____
###Markdown
Test the new F_q implementation
###Code
bfield = m.modules[0].Bfield_model
k = np.logspace(np.log10(bfield.kMin), np.log10(bfield.kH), bfield.dkSteps)
plt.semilogx(k, bfield.Fq(k) / bfield.Fq_old(k), ls='-')
plt.axvline(bfield.kL, ls='--')
plt.semilogx(k, bfield.Fq(k))
plt.semilogx(k, bfield.Fq_longitudinal(k))
###Output
_____no_output_____
###Markdown
Run all modules Now we run the modules. If ```multiprocess``` key word is larger than two, this will be split onto multiple cores with python's ```multiprocess``` module. The ```px,py,pa``` variables contain the mixing probability into the two photon polarization states (x,y) and into the axion state (a).
###Code
px,py,pa = m.run(multiprocess=2)
###Output
[0;36m core.py:[0;35m 639[0;0m --- [1;36mINFO[1;0m: Running Module 0: <class 'gammaALPs.base.environs.MixICMGaussTurb'>
[0;36m core.py:[0;35m 639[0;0m --- [1;36mINFO[1;0m: Running Module 2: <class 'gammaALPs.base.environs.MixGMF'>
###Markdown
Test the matmul multiplication routine
###Code
for i, T in enumerate(m._Tenv):
print(i, T.shape)
from gammaALPs.base.transfer import calc_conv_prob
def calc_conv_prob_new(pin, pout, T):
return np.squeeze(np.real(np.trace(
(np.matmul(pout,
np.matmul(T,
np.matmul(pin, np.transpose(T.conjugate(), axes=(0,1,2)))
)
)
),
axis1=1, axis2=2)))
def calc_conv_prob_by_hand(pin, pout, T):
# gives the same result as calc_conv_prob
# loop over energies
result = np.zeros(T.shape[0], dtype=np.float)
for ie in range(T.shape[0]):
Tdagger = np.transpose(np.conjugate(T[ie]))
inner_most = np.dot(pin, Tdagger)
inner = np.dot(T[ie], inner_most)
outer = np.dot(pout, inner)
result[ie] = np.real(np.trace(outer))
return result
## wrong order
Tfinal_wrong = np.matmul(
np.matmul(m._Tenv[0][0], m._Tenv[1][0]),
m._Tenv[2][0]
)
## right order
Tfinal_right = np.matmul(
np.matmul(m._Tenv[2][0], m._Tenv[1][0]),
m._Tenv[0][0]
)
print (Tfinal_wrong.shape)
px_wrong = calc_conv_prob(m.pin, m.px, Tfinal_wrong)
px_also_wrong = calc_conv_prob_new(m.pin, m.px, Tfinal_wrong)
py_wrong = calc_conv_prob(m.pin, m.py, Tfinal_wrong)
py_also_wrong = calc_conv_prob_new(m.pin, m.py, Tfinal_wrong)
px_maybe_right = calc_conv_prob(m.pin, m.px, Tfinal_right)
px_also_maybe_right = calc_conv_prob_by_hand(m.pin, m.px, Tfinal_right)
py_maybe_right = calc_conv_prob(m.pin, m.py, Tfinal_right)
py_also_maybe_right = calc_conv_prob_by_hand(m.pin, m.py, Tfinal_right)
plt.figure(dpi=150)
plt.semilogx(m.EGeV, py_wrong + px_wrong)
plt.semilogx(m.EGeV, px_also_wrong + py_also_wrong, ls='--')
plt.semilogx(m.EGeV, py_maybe_right + px_maybe_right, ls='-.')
#plt.semilogx(m.EGeV, px_also_maybe_right, ls=':')
###Output
_____no_output_____
###Markdown
Plot the output
###Code
pgg = px + py # the total photon survival probability
print (pgg.shape)
print (np.min(np.median(pgg, axis = 0)))
print (np.min(np.max(pgg, axis = 0)))
effect = dict(path_effects=[withStroke(foreground="w", linewidth=2)])
for p in pgg: # plot all realizations
plt.semilogx(m.EGeV, p)
plt.xlabel('Energy (GeV)')
plt.ylabel('Photon survival probability')
plt.legend(loc = 0, fontsize = 'medium')
plt.annotate(r'$m_a = {0:.1f}\,\mathrm{{neV}}, g_{{a\gamma}} = {1:.1f} \times 10^{{-11}}\,\mathrm{{GeV}}^{{-1}}$'.format(m.alp.m,m.alp.g),
xy = (0.95,0.1), size = 'x-large', xycoords = 'axes fraction', ha = 'right',**effect)
plt.gca().set_xscale('log')
plt.gca().set_yscale('log')
plt.subplots_adjust(left = 0.2)
plt.savefig("pgg.png", dpi = 150)
###Output
[0;36m legend.py:[0;35m1193[0;0m --- [1;31mWARNING[1;0m: No handles with labels found to put in legend.
###Markdown
Save results Save the results in an astropy table.
###Code
from astropy.table import Table
c = {}
c['pgg'] = np.vstack((EGeV, pgg))
t = Table(c)
t.write('ngc1275.fits', overwrite = True)
t1 = Table.read('ngc1275.fits')
t1
###Output
_____no_output_____
###Markdown
Plot the magnetic field of the cluster, stored in module 0
###Code
plt.plot(m.modules["ICMGaussTurb"].r,m.modules["ICMGaussTurb"].B * np.sin(m.modules["ICMGaussTurb"].psi),
lw=1)
plt.plot(m.modules["ICMGaussTurb"].r,m.modules["ICMGaussTurb"].B * np.cos(m.modules["ICMGaussTurb"].psi),
lw=1, ls = '--')
plt.ylabel('$B$ field ($\mu$G)')
plt.xlabel('$r$ (kpc)')
###Output
_____no_output_____
###Markdown
And plot the electron density:
###Code
plt.loglog(m.modules["ICMGaussTurb"].r,m.modules[0].nel * 1e-3)
plt.ylabel('$n_\mathrm{el}$ (cm$^{-3}$)')
plt.xlabel('$r$ (kpc)')
###Output
_____no_output_____
###Markdown
You can also manipulate the magnetic field and electron density at run time Calculate the coherence length of the transversal component $B$ field It is also possible to compute the spatial correlation $C(x_3) = \langle B_\perp(\vec{x}) B_\perp(\vec{x} + x_3 \vec{e}_3)\rangle$ of the transversal magnetic field along the line of sight $x_3$:
###Code
x3 = np.linspace(0.,50.,1000) # distance in kpc from cluster center
c = m.modules["ICMGaussTurb"].Bfield_model.spatialCorr(x3)
plt.plot(x3,c / c[0])
plt.xlabel("$x_3$ (kpc)")
plt.ylabel("$C(x_3) / C(0)$")
plt.grid(True)
###Output
_____no_output_____
###Markdown
This is turn can be used to calculate the coherence length of the field, $$ \Lambda_C = \frac{1}{C(0)} \int\limits_0^\infty C(x_3)dx_3. $$
###Code
from scipy.integrate import simps
x3 = np.linspace(0.,1e3,1000) # distance in kpc from cluster center
c = m.modules["ICMGaussTurb"].Bfield_model.spatialCorr(x3)
Lambda_c = simps(c, x3) / c[0]
print ("Coherence length of the field is Lambda_C = {0:.3e} kpc".format(Lambda_c))
###Output
_____no_output_____
###Markdown
Calculate the rotation measure of the field
###Code
m.modules["ICMGaussTurb"].Bfield_model.seed = 0 # or None
rm = m.modules["ICMGaussTurb"].Bfield_model.rotation_measure(m.modules["ICMGaussTurb"].r,
n_el=m.modules["ICMGaussTurb"].nel * 1e-3,
nsim=1000)
###Output
_____no_output_____
###Markdown
Taylor et al. (2006) found RM values between 6500 and 7500 rad m^-2. Comparing B-field realizations to that number:
###Code
from scipy.stats import norm
n, bins, _ = plt.hist(np.sort((rm)), bins=30, density=True, label="Simulated RM")
plt.xlabel("Rotation Measure (rad m${}^{-2}$)")
plt.ylabel("Density")
mean = np.mean(rm)
var = np.var(rm)
print ("RM mean +/- sqrt(var) in rad m^-2: {0:.2f} +/- {1:.2f}".format(mean, np.sqrt(var)))
plt.plot(bins, norm.pdf(bins, loc=mean, scale=np.sqrt(var)),
lw=2,
label="Gaussian Fit\n$\mu = {0:.2f}$\n$\sigma={1:.2f}$".format(mean, np.sqrt(var)))
print ("{0:.3f}% of B field realizations have |RM| > 7500 rad m^-2".format((np.abs(rm) > 7500).sum() / rm.size * 100.))
plt.legend()
plt.gca().tick_params(labelleft=False, left=False, right=False)
plt.savefig("sim_rm_perseus.png", dpi=150)
###Output
_____no_output_____
###Markdown
Plot the magnetic field of the Milky Way
###Code
plt.plot(m.modules["GMF"].r, m.modules["GMF"].B * np.sin(m.modules["GMF"].psi),
lw = 1)
plt.plot(m.modules["GMF"].r, m.modules["GMF"].B * np.cos(m.modules["GMF"].psi),
lw = 1)
plt.ylabel('$B$ field ($\mu$G)')
plt.xlabel('$r$ (kpc)')
###Output
_____no_output_____ |
signals/sp-m1-1-sounds-signals.ipynb | ###Markdown
_Speech Processing Labs 2020: Signals: Module 1_ 1 Sounds and Signals Learning Objectives: * Identify a periodic signals from a time vs amplitude (time domain) plot* Identify vocal pulses in a time vs amplitude graph, and how this relates to the concepts of period and frequency.* Identify differences in speech sounds based on a spectrogram Need to know: * Topic Videos: Time domain, Sound source, Periodic signal, Pitch* [How to use Praat](../phon/phon-0-getPraat.ipynb) * How to open and view a recording, pitch and spectrum plots and create spectral slices 1.1 Visualizing speech in the time domain Exercises* Record the following sentences in praat: * 'Say writer for me' * 'Say rider for me' * From the time vs amplitude graph, what differences are there between: * the 's' and the 'a' in 'say' * the 's' in say and the 'f' in 'for' * Looking at recordings of 'writer' and 'rider': are there any differences in your tutorial group in how you pronounce the following? Can you see evidence from this from the speech wave? * 't' vs 'd' * 'r' * 'i' Notes
###Code
#### Add your notes here. Press Esc-m to change this code cell to a markdown cell (or select Markdown in the menu at the top)
###Output
_____no_output_____
###Markdown
_Speech Processing Labs 2020: Signals: Module 1_ 1 Sounds and Signals Learning Objectives: * Identify a periodic signals from a time vs amplitude (time domain) plot* Identify vocal pulses in a time vs amplitude graph, and how this relates to the concepts of period and frequency.* Identify differences in speech sounds based on a spectrogram Need to know: * Topic Videos: Time domain, Sound source, Periodic signal, Pitch* [How to use Praat](../phon/phon-0-getPraat.ipynb) * How to open and view a recording, pitch and spectrum plots and create spectral slices 1.1 Visualizing speech in the time domain Exercises* Record the following sentences in praat: * 'Say writer for me' * 'Say rider for me' * From the time vs amplitude graph, what differences are there between: * the 's' and the 'a' in 'say' * the 's' in say and the 'f' in 'for' * Looking at recordings of 'writer' and 'rider': are there any differences in your tutorial group in how you pronounce the following? Can you see evidence from this from the speech wave? * 't' vs 'd' * 'r' * 'i' Notes
###Code
#### Add your notes here. Press Esc-m to change this code cell to a markdown cell (or select Markdown in the menu at the top)
###Output
_____no_output_____
###Markdown
_Speech Processing Labs 2020: Signals: Module 1_ 1 Sounds and Signals Learning Objectives: * Identify a periodic signals from a time vs amplitude (time domain) plot* Identify vocal pulses in a time vs amplitude graph, and how this relates to the concepts of period and frequency.* Identify differences in speech sounds based on a spectrogram Need to know: * Topic Videos: Time domain, Sound source, Periodic signal, Pitch* [How to use Praat](../phon/phon-0-getPraat.ipynb) * How to open and view a recording, pitch and spectrum plots and create spectral slices 1.1 Visualizing speech in the time domain Exercises* Record the following sentences in praat: * 'Say writer for me' * 'Say rider for me' * From the time vs amplitude graph, what differences are there between: * the 's' and the 'a' in 'say' * the 's' in say and the 'f' in 'for' * Looking at recordings of 'writer' and 'rider': are there any differences in your tutorial group in how you pronounce the following? Can you see evidence from this from the speech wave? * 't' vs 'd' * 'r' * 'i' Notes
###Code
#### Add your notes here. Press Esc-m to change this code cell to a markdown cell (or select Markdown in the menu at the top)
###Output
_____no_output_____
###Markdown
_Speech Processing Labs 2020: Signals: Module 1_ 1 Sounds and Signals Learning Objectives: * Identify a periodic signals from a time vs amplitude (time domain) plot* Identify vocal pulses in a time vs amplitude graph, and how this relates to the concepts of period and frequency.* Identify differences in speech sounds based on a spectrogram Need to know: * Topic Videos: Time domain, Sound source, Periodic signal, Pitch* [How to use Praat](../phon/phon-0-getPraat.ipynb) * How to open and view a recording, pitch and spectrum plots and create spectral slices 1.1 Visualizing speech in the time domain Exercises* Record the following sentences in praat: * 'Say writer for me' * 'Say rider for me' * From the time vs amplitude graph, what differences are there between: * the 's' and the 'a' in 'say' * the 's' in say and the 'f' in 'for' * Looking at recordings of 'writer' and 'rider': are there any differences in your tutorial group in how you pronounce the following? Can you see evidence from this from the speech wave? * 't' vs 'd' * 'r' * 'i' Notes
###Code
#### Add your notes here. Press Esc-m to change this code cell to a markdown cell (or select Markdown in the menu at the top)
###Output
_____no_output_____ |
Task-1 TSF.ipynb | ###Markdown
The Sparks FoundationData science and Business Analytics InternshipTask1 : Prediction Using Supervised MLPredict the percentage of an student based on the no. of study hoursIn this task I'll present how the Python Scikit-Learn library for machine learning can be used to implement regression functions.I will predict the percentage of marks that a student is expected to score based upon the number of hours they studied. This is a simple linear regression task as it involves just two variables.By: Riddhi Chodvadiya Import essential librariesPandas to manage the dataframes and enables us to read various datasets into a data frame. Numpy to save the Hours and Score values in different arrays. Scikit to split the data into two sets :- 1). Training set, 2). Test set. Matplotlib to represent the train model in pictorial form.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Read the data from the given CSV file, and assign it to variable "df"
###Code
df = pd.read_csv('http://bit.ly/w-data')
###Output
_____no_output_____
###Markdown
show the first 5 rows using dataframe.head() method
###Code
print("The first 5 rows of the dataframe")
df.head(5)
###Output
The first 5 rows of the dataframe
###Markdown
Data TypesThe main types stored in Pandas dataframes are object, float, int, bool and datetime64.
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
DescribeIt gives a statistical summary of each column, such as count, column mean value, column standard deviation, etc. We use the describe method:
###Code
df.describe()
###Output
_____no_output_____
###Markdown
shapeTo get the information about how many rows and columns are there in database we use the shape method.It return tuple as output where first value represents number of records(rows) and second value represents number of fields(columns).
###Code
df.shape
###Output
_____no_output_____
###Markdown
Data Visualization CorrelationCorrelation is a measure of the extent of interdependence between variables. we can calculate the correlation between variables of type "int64" or "float64" using the method "corr":
###Code
df.corr()
# Assign both of the fields(Hours, Scores) and store it to two different variable named x and y respectively.
x=df[['Hours']]
y=df[['Scores']]
###Output
_____no_output_____
###Markdown
Plot the data into graph
###Code
# Giving appropriate lables for axises and title for graph
plt.scatter(x,y)
plt.title("No. of Hours Vs Scores")
plt.xlabel("No. of Hours Studied by student")
plt.ylabel("Score of the student")
plt.show()
###Output
_____no_output_____
###Markdown
The graph above shows that the variables x and y have a HIGH POSITIVE LINEAR CORRELATION.(high positive beacause all points are nearby) Training and Testing
###Code
X = df.iloc[:, :-1].values
Y = df.iloc[:, 1].values
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.20, random_state=0)
print("number of test samples :", x_test.shape[0])
print("number of training samples:",x_train.shape[0])
###Output
number of test samples : 5
number of training samples: 20
###Markdown
The test_size parameter sets the proportion of data that is split into the testing set. In the above, the testing set is set to 20% of the total dataset. If one does not mention the random_state in the code, then whenever the person executes your code a new random value is generated and the train and test datasets would have different values each time.However, if the person use a particular value for random_state(random_state = 1 or any other value) everytime the result will be same,i.e, same values in train and test datasets. Training the Model
###Code
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm.fit(x_train, y_train)
###Output
_____no_output_____
###Markdown
Plotting the regression line
###Code
line = lm.coef_ * X + lm.intercept_
# Plotting for the test data
plt.scatter(X, Y)
plt.plot(X, line);
plt.show()
###Output
_____no_output_____
###Markdown
When evaluating our model, not only do we want to visualize the results, but we also want a quantitative measure to determine how accurate the model is. To determine the accuracy of a model we use score() method of R squared measureR squared, also known as the coefficient of determination, is a measure to indicate how close the data is to the fitted regression line.
###Code
acc=lm.score(x_test,y_test)
print("The accuracy of the Linear Regression Model created above is: ",acc)
###Output
The accuracy of the Linear Regression Model created above is: 0.9454906892105354
###Markdown
Prediction of Output
###Code
Yhat=lm.predict(x_test)
print('The output of predicted value is: ',Yhat)
#Comparing Actual score vs Predicted score
df1= pd.DataFrame({'ACTUAL SCORE':y_test,'PREDICTED SCORE':Yhat})
df1
###Output
_____no_output_____
###Markdown
Testing with your own data
###Code
hours = np.array([9.25])
own_pred = lm.predict(hours.reshape(-1,1))
print("No of Hours = {}".format(hours))
print("Predicted Score = {}".format(own_pred[0]))
###Output
No of Hours = [9.25]
Predicted Score = 93.69173248737539
###Markdown
Evaluation of Model
###Code
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, Yhat))
from sklearn.metrics import r2_score
print(' R-Squared :\t',metrics.r2_score(y_test, Yhat))
from sklearn.metrics import mean_squared_error
print(' Mean Squared Error :\t',mean_squared_error(y_test, Yhat))
###Output
Mean Squared Error : 21.598769307217456
|
DataAnalysis/Notebooks/.ipynb_checkpoints/User networks-checkpoint.ipynb | ###Markdown
For identifying networks of users via retweets
###Code
import pandas as pd
import networkx as nx
#Plotting
%matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
sns.set(style="whitegrid")
%cd twitterproject
# inject config value (on command line would've been --config=data-analysis)
import sys
# args = ['--config', 'data-analysis']
args = ['--config', 'laptop-mining']
old_sys_argv = sys.argv
sys.argv = [old_sys_argv[0]] + args
import environment
from TwitterDatabase.Repositories import DataRepositories as DR
from TwitterDatabase.DatabaseAccessObjects import DataConnections as DC
from TwitterDatabase.Models.WordORM import Word
from TwitterDatabase.Models.TweetORM import Users as User
from TwitterDatabase.Models.TweetORM import Tweet
from DataAnalysis.SearchTools.WordMaps import get_adjacent_word_counts, get_adjacent_words, get_user_ids_for_word
retweetIdQuery = """
SELECT
u.userID AS toId,
r.referring AS fromId,
u.screen_name,
r.tid
FROM
twitter_miner_laptop.users u
INNER JOIN
(SELECT
t.tweetID as tid,
t.in_reply_to_screen_name AS sn,
t.userID AS referring
FROM
twitter_miner_laptop.tweets t
WHERE
t.in_reply_to_screen_name IS NOT NULL) AS r
WHERE
u.screen_name = r.sn
"""
dao = DC.MySqlConnection(environment.CREDENTIAL_FILE)
data = pd.read_sql_query(retweetIdQuery, dao.engine) #, index_col='tweetID')
print("Loaded %s record" % len(data))
nodes = data.drop(['screen_name', 'tid'], axis=1)
nodes[-5:]
G = nx.DiGraph()
G.add_edges_from([(a.toId, a.fromId) for a in nodes.itertuples() if a.toId is not a.fromId])
G.size()
GRAPHS_FOLDER = "%s/temp_output/graphs" % environment.LOG_FOLDER_PATH
filepath = "%s/user-retweet.gexf" % GRAPHS_FOLDER
nx.write_gexf(G, filepath)
dc = nx.in_degree_centrality(G)
degreeCentrality = [{'id': k, 'degree_centrality' : dc[k] } for k in dc.keys()]
degreeCentrality = pd.DataFrame(degreeCentrality)
len(dc.keys())
dc = pd.DataFrame([{'id': k, 'degree_centrality' : dc[k] } for k in dc.keys()])
dc[:5]
dc[49113]
r[:2]
dc['49113']
# This produces a graph
G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
for n1, n2, degree in edges:
G.add_edges_from([(n1, n2) for i in range(0, degree)])
G.size()
nx.Graph()
# Retweet and quote tweets
###Output
_____no_output_____ |
content/lessons/09/Now-You-Code/NYC1-Address.ipynb | ###Markdown
Now You Code 1: AddressWrite a Python program to input elements of your postal address and then output them as if they were an address label. The program should use a dictionary to store the address and complete two function defintions one for inputting the address and one for printing the address.**NOTE:** While you most certainly can write this program without using dictionaries or functions, the point of the exercise is to get used to using them!!!Sample Run:```Enter Street: 314 Hinds HallEnter City: SyracuseEnter State: NYEnter Postal Zip Code: 13244Mailing Address:314 Hinds HallSyracuse , NY 13244``` Step 1: Problem Analysis `input_address` functionThis function should get input from the user at run time and return the input address.Inputs: None (gets input from user)Outputs: a Python dictionary of address info (street, city, state, postal_code)Algorithm (Steps in Program):
###Code
## Step 2: Write input_address_ function
#input: None (inputs from console)
#output: dictionary of the address
def input_address():
# todo: write code here to input the street, city, state and zip code and add to dictionary at runtime and store in a dictionary
street= input("Enter your street and house number: ")
address['street']=street
city= input("Enter your city : ")
address['city']=city
state= input("Enter your state: ")
address['state']=state
zipcode= input("Enter your zip code: ")
address['zipcode']= zipcode
return address
###Output
_____no_output_____
###Markdown
Step 3: Problem Analysis `print_address` functionThis function should display a mailing address using the dictionary variableInputs: dictionary variable of address information (street, city, state, postal_code)Outputs: None (prints to screen)Algorithm (Steps in Program):
###Code
## Step 4: write code
# input: address dictionary
# output: none (outputs to console)
def print_address(address):
# todo: write code to print the address (leave empty return at the end
print("MAILING ADDRESS:")
print(address['street'])
print(address['city'],",", address['state'], address['zipcode'])
return
###Output
_____no_output_____
###Markdown
Step 5: Problem Analysis main programShould be trivial at this point. Inputs: Outputs: Algorithm (Steps in Program):
###Code
## Step 6: write main program, use other 2 functions you made to solve this problem.
# main program
# todo: call input_address, then print_address
address= {}
input_address()
print_address(address)
###Output
Enter your street and house number: 383 W. Shore DR
Enter your city : Carmel
Enter your state: New York
Enter your zip code: 10512
MAILING ADDRESS:
383 W. Shore DR
Carmel , New York 10512
###Markdown
Now You Code 1: AddressWrite a Python program to input elements of your postal address and then output them as if they were an address label. The program should use a dictionary to store the address and complete two function defintions one for inputting the address and one for printing the address.**NOTE:** While you most certainly can write this program without using dictionaries or functions, the point of the exercise is to get used to using them!!!Sample Run:```Enter Street: 314 Hinds HallEnter City: SyracuseEnter State: NYEnter Postal Zip Code: 13244Mailing Address:314 Hinds HallSyracuse , NY 13244``` Step 1: Problem Analysis `input_address` functionThis function should get input from the user at run time and return the input address.Inputs: None (gets input from user)Outputs: a Python dictionary of address info (street, city, state, postal_code)Algorithm (Steps in Program):
###Code
## Step 2: Write input_address_ function
#input: None (inputs from console)
#output: dictionary of the address
def input_address():
address= {}
# todo: write code here to input the street, city, state and zip code and add to dictionary at runtime and store in a dictionary
return address
###Output
_____no_output_____
###Markdown
Step 3: Problem Analysis `print_address` functionThis function should display a mailing address using the dictionary variableInputs: dictionary variable of address information (street, city, state, postal_code)Outputs: None (prints to screen)Algorithm (Steps in Program):
###Code
## Step 4: write code
# input: address dictionary
# output: none (outputs to console)
def print_address(address):
return
###Output
_____no_output_____
###Markdown
Step 5: Problem Analysis main programShould be trivial at this point. Inputs: Outputs: Algorithm (Steps in Program):
###Code
## Step 6: write main program, use other 2 functions you made to solve this problem.
# main program
# todo: call input_address, then print_address
words = ['i','the','boy','went','to','the','store','like','dog']
text = input("enter some text: ")
def word_check(text):
for word in text.casefold().split():
if word not in words:
print(word)
print("Possible misspellings:")
print(word_check(text))
###Output
enter some text: xxx
Possible misspellings:
xxx
None
###Markdown
Now You Code 1: AddressWrite a Python program to input elements of your postal address and then output them as if they were an address label. The program should use a dictionary to store the address and complete two function defintions one for inputting the address and one for printing the address.**NOTE:** While you most certainly can write this program without using dictionaries or functions, the point of the exercise is to get used to using them!!!Sample Run:```Enter Street: 314 Hinds HallEnter City: SyracuseEnter State: NYEnter Postal Zip Code: 13244Mailing Address:314 Hinds HallSyracuse , NY 13244``` Step 1: Problem Analysis `input_address` functionThis function should get input from the user at run time and return the input address.Inputs: None (gets input from user)Outputs: a Python dictionary of address info (street, city, state, postal_code)Algorithm (Steps in Program):
###Code
## Step 2: Write input_address_ function
#input: None (inputs from console)
#output: dictionary of the address
def input_address():
address = {'street':'314 Hinds Hall','City':'Syracuse','State':'NY','Zip Code':13244}
# todo: write code here to input the street, city, state and zip code and add to dictionary at runtime and store in a dictionary
print('address')
return address
###Output
_____no_output_____
###Markdown
Step 3: Problem Analysis `print_address` functionThis function should display a mailing address using the dictionary variableInputs: dictionary variable of address information (street, city, state, postal_code)Outputs: None (prints to screen)Algorithm (Steps in Program):
###Code
## Step 4: write code
# input: address dictionary
# output: none (outputs to console)
def print_address(address):
# todo: write code to print the address (leave empty return at the end
return
###Output
_____no_output_____
###Markdown
Step 5: Problem Analysis main programShould be trivial at this point. Inputs: Outputs: Algorithm (Steps in Program):
###Code
## Step 6: write main program, use other 2 functions you made to solve this problem.
# main program
# todo: call input_address, then print_address
###Output
_____no_output_____
###Markdown
Now You Code 1: AddressWrite a Python program to input elements of your postal address and then output them as if they were an address label. The program should use a dictionary to store the address and complete two function defintions one for inputting the address and one for printing the address.**NOTE:** While you most certainly can write this program without using dictionaries or functions, the point of the exercise is to get used to using them!!!Sample Run:```Enter Street: 314 Hinds HallEnter City: SyracuseEnter State: NYEnter Postal Zip Code: 13244Mailing Address:314 Hinds HallSyracuse , NY 13244``` Step 1: Problem Analysis `input_address` functionThis function should get input from the user at run time and return the input address.Inputs: None (gets input from user)Outputs: a Python dictionary of address info (street, city, state, postal_code)Algorithm (Steps in Program):
###Code
## Step 2: Write input_address_ function
#input: None (inputs from console)
#output: dictionary of the address
def input_address():
address= {}
# todo: write code here to input the street, city, state and zip code and add to dictionary at runtime and store in a dictionary
street = input("Enter street: ")
address['street'] = street
city = input("Enter city: ")
address['city'] = city
state = input("Enter state: ")
address['state'] = state
postal_code = input("Enter Postal Zip Code: ")
address['postal_code'] = postal_code
return address
###Output
_____no_output_____
###Markdown
Step 3: Problem Analysis `print_address` functionThis function should display a mailing address using the dictionary variableInputs: dictionary variable of address information (street, city, state, postal_code)Outputs: None (prints to screen)Algorithm (Steps in Program):
###Code
## Step 4: write code
# input: address dictionary
# output: none (outputs to console)
def print_address(address):
# todo: write code to print the address (leave empty return at the end
print("Mailing Address: %s %s ,%s %s" % (address['street'], address['city'], address['state'], address['postal_code']))
return
###Output
_____no_output_____
###Markdown
Step 5: Problem Analysis main programShould be trivial at this point. Inputs: Outputs: Algorithm (Steps in Program):
###Code
## Step 6: write main program, use other 2 functions you made to solve this problem.
address = input_address()
print_address(address)
# main program
# todo: call input_address, then print_address
###Output
Enter street: 38 Rockledge Rd
Enter city: Mahopac
Enter state: NY
Enter Postal Zip Code: 10541
Mailing Address: 38 Rockledge Rd Mahopac ,NY 10541
###Markdown
Now You Code 1: AddressWrite a Python program to input elements of your postal address and then output them as if they were an address label. The program should use a dictionary to store the address and complete two function defintions one for inputting the address and one for printing the address.**NOTE:** While you most certainly can write this program without using dictionaries or functions, the point of the exercise is to get used to using them!!!Sample Run:```Enter Street: 314 Hinds HallEnter City: SyracuseEnter State: NYEnter Postal Zip Code: 13244Mailing Address:314 Hinds HallSyracuse , NY 13244``` Step 1: Problem Analysis `input_address` functionThis function should get input from the user at run time and return the input address.Inputs: None (gets input from user)Outputs: a Python dictionary of address info (street, city, state, postal_code)Algorithm (Steps in Program):
###Code
## Step 2: Write input_address_ function
#input: None (inputs from console)
#output: dictionary of the address
def input_address():
address= {}
# todo: write code here to input the street, city, state and zip code and add to dictionary at runtime and store in a dictionary
return address
###Output
_____no_output_____
###Markdown
Step 3: Problem Analysis `print_address` functionThis function should display a mailing address using the dictionary variableInputs: dictionary variable of address information (street, city, state, postal_code)Outputs: None (prints to screen)Algorithm (Steps in Program):
###Code
## Step 4: write code
# input: address dictionary
# output: none (outputs to console)
def print_address(address):
# todo: write code to print the address (leave empty return at the end
return
###Output
_____no_output_____
###Markdown
Step 5: Problem Analysis main programShould be trivial at this point. Inputs: Outputs: Algorithm (Steps in Program):
###Code
## Step 6: write main program, use other 2 functions you made to solve this problem.
# main program
# todo: call input_address, then print_address
###Output
_____no_output_____ |
tutorials/Tutorial.ipynb | ###Markdown
```pirel``` Tutorial : introduction and ```pcells```This is a tutorial for the [```pirel```](https://github.com/giumc/pirel) Python3 module.```pirel``` stands for PIezoelectric REsonator Layout, and it is based on the module [```phidl```](https://github.com/amccaugh/phidl) There are ***four*** packages within ```pirel```: ```pcells``` , ```modifiers``` , ```sweeps``` and ```tools``` . Let's start with ```pcells```.
###Code
import pirel.pcells as pc
###Output
_____no_output_____
###Markdown
```pcells``` is a collection of classes that are commonly required in piezoelectric resonator design.all the cells defined here are derived from ```pirel.tools.LayoutPart``` and they share :* ```name``` attribute * ```set_params``` method* ```get_params``` method* ```view``` method* ```draw``` method* ```get_components``` methodIn general, these modules are growing pretty fast in size, so it wouldn't make sense to go through all the pcells/modifiers/sweeps/tools.Instead, try to ```help(pirel.pcells)```,```help(pirel.tools)``` when you are looking for information!An example of layout class is the InterDigiTated fingers ```pc.IDT```.
###Code
idt=pc.IDT(name='TutorialIDT')
###Output
_____no_output_____
###Markdown
You can get the parameters available to ```idt``` by just printing it!
###Code
idt
###Output
_____no_output_____
###Markdown
you can get these parameters in a ```dict``` by using ```get_params```.you can modify any of the parameters and then import them back in the object.
###Code
idt_params=idt.get_params()
idt_params["N"]=4
idt.set_params(idt_params)
idt
###Output
_____no_output_____
###Markdown
At any point, you can visualize the layout cell by calling ```view()```
###Code
import matplotlib.pyplot as plt
idt.view(blocking=True)
###Output
_____no_output_____
###Markdown
the output is showing a ```phidl.Device``` . These ```Device``` instances are powered up versions of ```gdspy.Cell``` instances.Refer to [```phidl```](https://github.com/amccaugh/phidl) if you want to learn how many cool things you can do with them.you can explicitly get this ```Device``` instance by calling the ```draw()``` method.At that point, you can play around with the cells by using the powerful tools in ```phidl```.In this example, we will align and distribute two ```idt``` Device using the ```phidl``` module.
###Code
idt.coverage=0.3
cell1=idt.draw()
idt.coverage=0.8 ### yes you can set attributes like this, but you will have to find variable names from typing help(idt)
cell2=idt.draw()
import phidl
import phidl.device_layout as dl
from phidl import quickplot as qp
g=dl.Group([cell1,cell2])
g.distribute(direction='x',spacing=30)
g.align(alignment='y')
cell_tot=phidl.Device()
cell_tot<<cell1
cell_tot<<cell2
qp(cell_tot)
###Output
_____no_output_____
###Markdown
Feel free to look at ```help(pc)``` to figure out all the classes implemented in this module.Some classes in ``pc`` are created by subclassing , some other by composition of *unit* classes. For example, a Lateral Field Excitation RESonator (```pc.LFERes```) is built starting from some components:* ```pc.IDT```* ```pc.Bus```* ```pc.EtchPit```* ```pc.Anchor```For any class in ```pc```, you can find components by querying the ```get_components()``` method:
###Code
via=pc.Via(name='TutorialVia')
res=pc.LFERes(name='TutorialLFERes')
via.get_components()
res.get_components()
###Output
_____no_output_____
###Markdown
Note that ```via``` has no components, ```res``` has four.All layout parameters of each component is also a layout parameter of the composed class.For example, this is the list of parameters that define ```LFERes```:
###Code
lfe_params=res.get_params()
res.view()
lfe_params
###Output
_____no_output_____
###Markdown
Classes built from ```components``` can have also parameters of their own : the class ```FBERes``` (Floating Bottom Electrode Resonators) has a parameter that sets the margin of the floating bottom electrode:
###Code
fbe=pc.FBERes(name="TutorialFBE")
params=fbe.get_params()
params["PlatePosition"]='in, long'
fbe.set_params(params)
fbe.view()
###Output
_____no_output_____
###Markdown
A useful feature of ```set_params``` is that functions can be passed.For example, when setting a resonator anchor, it might happen that some dimensions have to be derived from others.The resonator overall width can be found by querying the ```get_active_area``` method of the ```idt``` component of ```fbe```:
###Code
fbe.idt.active_area
###Output
_____no_output_____
###Markdown
If you want to set a pitch, ```active area``` will be updated automatically. If you want to keep the anchor size a third of the active area, you can simply write
###Code
params['AnchorSizeX']=lambda x : x.idt.active_area.x/3
params['AnchorMetalizedX'] = lambda x : x.anchor.size.x*0.8
fbe.set_params(params)
fbe.view()
###Output
_____no_output_____
###Markdown
Note that ```idt.active_area``` is a ```pt.Point``` instance . To checkout what you can do with ```pt.Point```, ```help (pt.Point)```!Now, scaling ```IDTPitch``` will scale ```AnchorSizeX``` and ```AnchorMetalizedX``` accordingly...
###Code
params['IDTPitch']=40
fbe.set_params(params)
fbe.view()
###Output
_____no_output_____
###Markdown
If you are bothered by the ```phidl.Port``` labels, just pass the optional ```joined=True``` to the ```check``` method in ```pirel.tools```
###Code
import pirel.tools as pt
fbe_cell=fbe.draw()
pt.check(fbe_cell,joined=True)
###Output
_____no_output_____
###Markdown
SCDCdm - Compositional analysis of single-cell data This notebook serves as a tutorial for using the *SCDCdm* package to analyze changes in cell composition data.The package is intended to be used with data coming from single-cell RNA-seq experiments, however there are no restrictions that prevent the use of data from other sources.The data we use in the following example comes from [*Haber et al. [2017]*](https://www.nature.com/articles/nature24489).It contains samples from the small intestinal epithelium of mice with different conditions.
###Code
# Setup
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import arviz as az
import pickle as pkl
from scdcdm.util import comp_ana as mod
from scdcdm.util import cell_composition_data as dat
###Output
_____no_output_____
###Markdown
Data preparation
###Code
# Load data
cell_counts = pd.read_csv("../data/haber_counts.csv")
print(cell_counts)
###Output
Mouse Endocrine Enterocyte Enterocyte.Progenitor Goblet Stem \
0 Control_1 36 59 136 36 239
1 Control_2 5 46 23 20 50
2 Control_3 45 98 188 124 250
3 Control_4 26 221 198 36 131
4 H.poly.Day10_1 42 71 203 147 271
5 H.poly.Day10_2 40 57 383 170 321
6 H.poly.Day3_1 52 75 347 66 323
7 H.poly.Day3_2 65 126 115 33 65
8 Salm_1 37 332 113 59 90
9 Salm_2 32 373 116 67 117
TA TA.Early Tuft
0 125 191 18
1 11 40 5
2 155 365 33
3 130 196 4
4 109 180 146
5 244 256 71
6 263 313 51
7 39 129 59
8 47 132 10
9 65 168 12
###Markdown
Looking at the data, we see that we have 4 control samples, and 3 conditions with 2 samples each. To use the models in *SCDCdm*, we first have to convert the data into an [anndata](https://github.com/theislab/anndata) object.This object separates our data components: Cell counts are stored in `data.X`, covariates in `data.obs`.For our first example, we want to look at how the Salmonella infection influences the cell composition.
###Code
# Convert data to anndata object
# Filter out control and salmonella data
salm_indices = [0, 1, 2, 3, 8, 9]
salm_df = cell_counts.iloc[salm_indices, :]
# Convert to a CompositionalData object
data_salm = dat.from_pandas(salm_df, covariate_columns=["Mouse"])
# Extract condition from mouse name and add it as an extra column to the covariates
data_salm.obs["Condition"] = data_salm.obs["Mouse"].str.replace(r"_[0-9]", "")
print(data_salm.X)
print(data_salm.obs)
###Output
Transforming to str index.
###Markdown
Plotting the data, we can see that there is a large increase of Enterocytes in the infected sampes, while most other cell types slightly decrease.Since scRNA-seq experiments are limited in the number of cells per sample, the count data is compositional, which leads to negative correlations between the cell types.Thus, the slight decreases in many cell types might be fully caused by the increase in Enterocytes.
###Code
fig, ax = plt.subplots(figsize=(12,5))
df = pd.melt(salm_df, id_vars=['Mouse'], value_vars=salm_df.columns[1:])
sns.set_context('notebook')
sns.set_style('ticks')
d = sns.barplot(x='variable', y = 'value', hue=df["Mouse"].str.replace(r"_[0-9]", ""), data=df)
d.set_ylabel('Cell Count')
loc, labels = plt.xticks()
d.set_xticklabels(labels, rotation=90)
d.set_xlabel('Cell type')
plt.show()
###Output
_____no_output_____
###Markdown
*Note that the use of* anndata *in* SCDCdm *is different from the use in scRNA-seq pipelines, e.g.* scanpy. *To convert* scanpy *objects to a SCDCdm dataset, have a look at `dat.from_scanpy_list`.* Model setup and inferenceWe can now create the model and run inference on it. The `mod.CompositionalAnalysis` class takes our data object and performs parameter inference on it.The `formula` parameter specifies how the covariates are used in the model. It can process R-style formulas via the [patsy](https://patsy.readthedocs.io/en/latest/) package, e.g. `formula="Cov1 + Cov2 + Cov3"`.The `baseline_index` parameter is used to specify a cell type that is left unchanged by the covariates.This feature is optional, a baseline index of `None` specifies the model without baseline specification.For now, we will use no baseline index.
###Code
model_salm = mod.CompositionalAnalysis(data_salm, formula="Condition", baseline_index=None)
###Output
_____no_output_____
###Markdown
HMC sampling can be performed by calling `sample_hmc()` on the model, which produces a `scdcdm.util.result_classes.CAResult` object.
###Code
# Run MCMC
sim_results = model_salm.sample_hmc()
###Output
MCMC sampling finished. (122.948 sec)
Acceptance rate: 60.7%
###Markdown
Result interpretationCalling `summary()` on the results object, we can see all relevant information for further analysis:
###Code
sim_results.summary()
###Output
Compositional Analysis summary:
Data: 6 samples, 8 cell types
Baseline index: None
Formula: Condition
Intercepts:
Final Parameter Expected Sample
Cell Type
Endocrine 1.087 33.992407
Enterocyte 2.318 116.412180
Enterocyte.Progenitor 2.516 141.902070
Goblet 1.664 60.529882
Stem 2.699 170.398050
TA 2.103 93.891247
TA.Early 2.860 200.164028
Tuft 0.435 17.710137
Effects:
Final Parameter Expected Sample \
Covariate Cell Type
Condition[T.Salm] Endocrine 0.000000 23.425261
Enterocyte 1.443536 339.798016
Enterocyte.Progenitor 0.000000 97.789281
Goblet 0.000000 41.713089
Stem 0.000000 117.426778
TA 0.000000 64.703479
TA.Early 0.000000 137.939471
Tuft 0.000000 12.204625
log2-fold change
Covariate Cell Type
Condition[T.Salm] Endocrine -0.537147
Enterocyte 1.545435
Enterocyte.Progenitor -0.537147
Goblet -0.537147
Stem -0.537147
TA -0.537147
TA.Early -0.537147
Tuft -0.537147
###Markdown
**Model properties**First, the summary shows an overview over the model properties: * Number of samples/cell types* Index of the baseline cell type, starting at 0.* The formula usedThe model has two types of parameters that are relevant for analysis - intercepts and effects. These can be interpreted like in a standard regression model:Intercepts show how the cell types are distributed without any active covariates, effects show ho the covariates influence the cell types.**Intercepts**The first column of the intercept summary shows the parameters determined by the MCMC inference.The "Expected sample" column gives some context to the numerical values. If we take the mean number of cells over all samples from our dataset, then the model expects a new sample with exaxtly that many cells to look like this.**Effects**For the effect summary, the first column again shows the inferred parameters for all combinations of covariates and cell types. A value is zero means that no significant effect was detected. For a value other than zero, a significant change was detected. A positive sign indicates an increase, a negative sign a decrease.The "Expected sample" and "log2-fold change" columns give us an idea on the magnitude of this increase. The expected sample is calculated for each covariate separately (covariate value = 1), with the same method as for the intercepts.The log-fold change is then calculated between this column and the expected intercept sample.Since the data is compositional, cell types for which no significant change was detected, are expected to change as well.**Interpretation**In the salmonella case, we see only a significant increase of Enterocytes, while all other cell types are unaffected by the disease.The log-fold change of Enterocytes between control and infected samples with the same total cell count lies at about 1.54. Diagnostics and plottingFor further analysis, `summary_extended()` gives us more information about the model:
###Code
sim_results.summary_extended()
###Output
Compositional Analysis summary (extended):
Data: 6 samples, 8 cell types
Baseline index: None
Formula: Condition
Spike-and-slab threshold: 0.692
MCMC Sampling: Sampled 20000 chain states (5000 burnin samples) in 122.948 sec. Acceptance rate: 60.7%
Intercepts:
Final Parameter HDI 3% HDI 97% SD \
Cell Type
Endocrine 1.087 0.363 1.845 0.402
Enterocyte 2.318 1.723 2.863 0.315
Enterocyte.Progenitor 2.516 1.934 3.126 0.323
Goblet 1.664 1.007 2.333 0.360
Stem 2.699 2.085 3.227 0.312
TA 2.103 1.473 2.671 0.329
TA.Early 2.860 2.270 3.411 0.313
Tuft 0.435 -0.347 1.138 0.393
Expected Sample
Cell Type
Endocrine 33.992407
Enterocyte 116.412180
Enterocyte.Progenitor 141.902070
Goblet 60.529882
Stem 170.398050
TA 93.891247
TA.Early 200.164028
Tuft 17.710137
Effects:
Final Parameter HDI 3% HDI 97% \
Covariate Cell Type
Condition[T.Salm] Endocrine 0.000000 -0.099 1.226
Enterocyte 1.443536 0.991 1.951
Enterocyte.Progenitor 0.000000 -0.039 0.638
Goblet 0.000000 -0.055 1.082
Stem 0.000000 -0.424 0.139
TA 0.000000 -0.473 0.307
TA.Early 0.000000 -0.150 0.503
Tuft 0.000000 -0.338 1.061
SD Inclusion probability \
Covariate Cell Type
Condition[T.Salm] Endocrine 0.418 0.472800
Enterocyte 0.268 1.000000
Enterocyte.Progenitor 0.219 0.299933
Goblet 0.390 0.489000
Stem 0.167 0.254333
TA 0.197 0.279933
TA.Early 0.181 0.284667
Tuft 0.333 0.324733
Expected Sample log2-fold change
Covariate Cell Type
Condition[T.Salm] Endocrine 23.425261 -0.537147
Enterocyte 339.798016 1.545435
Enterocyte.Progenitor 97.789281 -0.537147
Goblet 41.713089 -0.537147
Stem 117.426778 -0.537147
TA 64.703479 -0.537147
TA.Early 137.939471 -0.537147
Tuft 12.204625 -0.537147
###Markdown
The spike-and-slab threshold value dpends on the number of cell types and determines the inclusion probability cutoff for significant effects.Further, the extended summary includes some information on the MCMC sampling procedure (chain length, burn-in, acceptance rate, duration).For both effects and intercepts, we also get the standard deviation and HPD interval endpoints of the elements of the generated Markov chain.The width of the credible interval can be set by e.g. `summary_extended(hdi_prob=0.9)`The effects summary also includes the spike-and-slab inclusion probability for each effect, i.e. the share of MCMC samples, for which this effect was not set to 0 by the spike-and-slab prior.We can also use the summary tables from `summary_extended()` as pandas DataFrames to tweak them further, for example we can show only the cell types with significant effects:
###Code
# Intercept dataframe: sim_result.intercept_df
# Effect dataframe: sim_result.effect_df
sig_effects = sim_results.effect_df.loc[sim_results.effect_df["Final Parameter"] != 0]
print(sig_effects)
###Output
Final Parameter HDI 3% HDI 97% SD \
Covariate Cell Type
Condition[T.Salm] Enterocyte 1.443536 0.991 1.951 0.268
Inclusion probability Expected Sample \
Covariate Cell Type
Condition[T.Salm] Enterocyte 1.0 339.798016
log2-fold change
Covariate Cell Type
Condition[T.Salm] Enterocyte 1.545435
###Markdown
Also, the results object supports all plotting and diagnosis functions of [arviz](https://github.com/arviz-devs/arviz).
###Code
# Example: Plot Markov chain density for all effects
az.plot_density(sim_results, var_names="beta")
plt.show()
###Output
_____no_output_____
###Markdown
Saving the resultsResult objects can simply be saved to disk via pickle:
###Code
# saving
path = "test"
sim_results.save(path)
# loading
with open(path, "rb") as f:
sim_results_2 = pkl.load(f)
sim_results_2.summary()
###Output
_____no_output_____
###Markdown
Tweaking the model: categorical covariates, BaselineThe compositional analysis models from *SCDCdm* are also able to automatically deal with categorical covariates via the [patsy](https://patsy.readthedocs.io/en/latest/) framework for formula specification. Per default, categorical variables are encoded via full-rank treatment coding. Hereby, the value of the first sample in the dataset is used as the reference (control) category.We can change this by tweaking the model formula to be `"C(, Treatment(''))"`:
###Code
# saving
path = "test"
sim_results.save(path)
# loading
with open(path, "rb") as f:
sim_results_2 = pkl.load(f)
sim_results_2.summary()
###Output
Compositional Analysis summary:
Data: 6 samples, 8 cell types
Baseline index: None
Formula: Condition
Intercepts:
Final Parameter Expected Sample
Cell Type
Endocrine 1.087 33.992407
Enterocyte 2.318 116.412180
Enterocyte.Progenitor 2.516 141.902070
Goblet 1.664 60.529882
Stem 2.699 170.398050
TA 2.103 93.891247
TA.Early 2.860 200.164028
Tuft 0.435 17.710137
Effects:
Final Parameter Expected Sample \
Covariate Cell Type
Condition[T.Salm] Endocrine 0.000000 23.425261
Enterocyte 1.443536 339.798016
Enterocyte.Progenitor 0.000000 97.789281
Goblet 0.000000 41.713089
Stem 0.000000 117.426778
TA 0.000000 64.703479
TA.Early 0.000000 137.939471
Tuft 0.000000 12.204625
log2-fold change
Covariate Cell Type
Condition[T.Salm] Endocrine -0.537147
Enterocyte 1.545435
Enterocyte.Progenitor -0.537147
Goblet -0.537147
Stem -0.537147
TA -0.537147
TA.Early -0.537147
Tuft -0.537147
None
###Markdown
Tweaking the model: categorical covariates, BaselineThe compositional analysis models from *SCDCdm* are also able to automatically deal with categorical covariates via the [patsy](https://patsy.readthedocs.io/en/latest/) framework for formula specification. Per default, categorical variables are encoded via full-rank treatment coding. Hereby, the value of the first sample in the dataset is used as the reference (control) category.We can change this by tweaking the model formula to be `"C(, Treatment(''))"`:
###Code
# Set salmonella infection as reference category
model_salm_switch_ref = mod.CompositionalAnalysis(data_salm, formula="C(Condition, Treatment('Salm'))", baseline_index=None)
switch_results = model_salm_switch_ref.sample_hmc()
switch_results.summary()
###Output
MCMC sampling finished. (153.330 sec)
Acceptance rate: 49.6%
Compositional Analysis summary:
Data: 6 samples, 8 cell types
Baseline index: None
Formula: C(Condition, Treatment('Salm'))
Intercepts:
Final Parameter Expected Sample
Cell Type
Endocrine 1.336 28.982938
Enterocyte 3.757 326.264146
Enterocyte.Progenitor 2.603 102.894821
Goblet 1.923 52.128265
Stem 2.677 110.797844
TA 2.090 61.602799
TA.Early 2.908 139.589687
Tuft 0.514 12.739499
Effects:
Final Parameter \
Covariate Cell Type
C(Condition, Treatment('Salm'))[T.Control] Endocrine 0.000000
Enterocyte -1.427285
Enterocyte.Progenitor 0.000000
Goblet 0.000000
Stem 0.000000
TA 0.000000
TA.Early 0.000000
Tuft 0.000000
Expected Sample \
Covariate Cell Type
C(Condition, Treatment('Salm'))[T.Control] Endocrine 41.226028
Enterocyte 111.361894
Enterocyte.Progenitor 146.360067
Goblet 74.148497
Stem 157.601516
TA 87.625303
TA.Early 198.555726
Tuft 18.120969
log2-fold change
Covariate Cell Type
C(Condition, Treatment('Salm'))[T.Control] Endocrine 0.508352
Enterocyte -1.550785
Enterocyte.Progenitor 0.508352
Goblet 0.508352
Stem 0.508352
TA 0.508352
TA.Early 0.508352
Tuft 0.508352
###Markdown
We can also handle multiple levels of one categorical covariate:
###Code
# Get dataset with all three diseases
data_all = dat.from_pandas(cell_counts, covariate_columns=["Mouse"])
data_all.obs["Condition"] = data_all.obs["Mouse"].str.replace(r"_[0-9]", "")
print(data_all.X)
print(data_all.obs)
# model all three diseases at once
model_all = mod.CompositionalAnalysis(data_all, formula="Condition", baseline_index=None)
switch_results = model_all.sample_hmc()
switch_results.summary()
###Output
MCMC sampling finished. (165.183 sec)
Acceptance rate: 48.7%
Compositional Analysis summary:
Data: 10 samples, 8 cell types
Baseline index: None
Formula: Condition
Intercepts:
Final Parameter Expected Sample
Cell Type
Endocrine 0.932 44.080004
Enterocyte 1.966 123.965868
Enterocyte.Progenitor 2.340 180.188994
Goblet 1.457 74.515434
Stem 2.424 195.978756
TA 1.879 113.636671
TA.Early 2.547 221.629322
Tuft 0.554 30.204964
Effects:
Final Parameter \
Covariate Cell Type
Condition[T.H.poly.Day10] Endocrine 0.000000
Enterocyte -0.706797
Enterocyte.Progenitor 0.000000
Goblet 0.000000
Stem 0.000000
TA 0.000000
TA.Early 0.000000
Tuft 0.865361
Condition[T.H.poly.Day3] Endocrine 0.000000
Enterocyte 0.000000
Enterocyte.Progenitor 0.000000
Goblet 0.000000
Stem 0.000000
TA 0.000000
TA.Early 0.000000
Tuft 0.000000
Condition[T.Salm] Endocrine 0.000000
Enterocyte 1.479182
Enterocyte.Progenitor 0.000000
Goblet 0.000000
Stem 0.000000
TA 0.000000
TA.Early 0.000000
Tuft 0.000000
Expected Sample \
Covariate Cell Type
Condition[T.H.poly.Day10] Endocrine 45.053462
Enterocyte 62.492881
Enterocyte.Progenitor 184.168265
Goblet 76.161023
Stem 200.306727
TA 116.146210
TA.Early 226.523757
Tuft 73.347688
Condition[T.H.poly.Day3] Endocrine 44.080004
Enterocyte 123.965868
Enterocyte.Progenitor 180.188994
Goblet 74.515434
Stem 195.978756
TA 113.636671
TA.Early 221.629322
Tuft 30.204964
Condition[T.Salm] Endocrine 30.891950
Enterocyte 381.334588
Enterocyte.Progenitor 126.279240
Goblet 52.221571
Stem 137.344950
TA 79.638341
TA.Early 155.321265
Tuft 21.168107
log2-fold change
Covariate Cell Type
Condition[T.H.poly.Day10] Endocrine 0.031514
Enterocyte -0.988179
Enterocyte.Progenitor 0.031514
Goblet 0.031514
Stem 0.031514
TA 0.031514
TA.Early 0.031514
Tuft 1.279966
Condition[T.H.poly.Day3] Endocrine 0.000000
Enterocyte 0.000000
Enterocyte.Progenitor 0.000000
Goblet 0.000000
Stem 0.000000
TA 0.000000
TA.Early 0.000000
Tuft 0.000000
Condition[T.Salm] Endocrine -0.512893
Enterocyte 1.621114
Enterocyte.Progenitor -0.512893
Goblet -0.512893
Stem -0.512893
TA -0.512893
TA.Early -0.512893
Tuft -0.512893
###Markdown
*SCDCdm* also allows us to set a baseline cell type whose effect is always 0. If such a cell type exists, it is recommended to set it as the baseline to avoid interpretion issues.The baseline can easily be specified in the model setup:
###Code
# model salmonella infection with baseline set to Endocrine cells
model_baseline = mod.CompositionalAnalysis(data_salm, formula="Condition", baseline_index="Endocrine")
baseline_results = model_baseline.sample_hmc()
baseline_results.summary()
###Output
MCMC sampling finished. (210.718 sec)
Acceptance rate: 55.3%
Compositional Analysis summary:
Data: 6 samples, 8 cell types
Baseline index: 0
Formula: Condition
Intercepts:
Final Parameter Expected Sample
Cell Type
Endocrine 1.161 37.160323
Enterocyte 2.300 116.075590
Enterocyte.Progenitor 2.495 141.067940
Goblet 1.671 61.882759
Stem 2.680 169.735547
TA 2.086 93.713440
TA.Early 2.835 198.193062
Tuft 0.389 17.171338
Effects:
Final Parameter Expected Sample \
Covariate Cell Type
Condition[T.Salm] Endocrine 0.000000 25.918512
Enterocyte 1.415883 333.565973
Enterocyte.Progenitor 0.000000 98.391798
Goblet 0.000000 43.161869
Stem 0.000000 118.386825
TA 0.000000 65.363072
TA.Early 0.000000 138.235319
Tuft 0.000000 11.976632
log2-fold change
Covariate Cell Type
Condition[T.Salm] Endocrine -0.519780
Enterocyte 1.522908
Enterocyte.Progenitor -0.519780
Goblet -0.519780
Stem -0.519780
TA -0.519780
TA.Early -0.519780
Tuft -0.519780
###Markdown
Example of network generation and dynamic simulation
###Code
%matplotlib inline
%load_ext autoreload
%autoreload 2
import time
# load the modules specific to this project
from context import network as nw
from context import physics
from context import timemarching as tm
from context import plotter
from context import logger
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
1. Define the broadcasting channels of the networkThis is done by creating a list of the channel names. The names are arbitrary and can be set by the user, such as 'postive', 'negative' or explicit wavelenghts like '870 nm', '700 nm'. Here I chose the colors 'red' and 'blue'.
###Code
channel_list = ['red', 'blue']
# Automatically generate the object that handles them
channels = {channel_list[v] : v for v in range(len(channel_list))}
# It looks like this
print(channels)
###Output
{'red': 0, 'blue': 1}
###Markdown
2. Define the layersDefine the layers of nodes in terms of how they are connected to the channels. Layers and weights are organized in dictionaries. The input and output layers do not need to be changed, but for the hidden layer we need to specify the number of nodes N and assign the correct channels to the input/output of the node.
###Code
# Create layers ordered from 0 to P organized in a dictionary
layers = {}
# An input layer automatically creates on node for each channel that we define
layers[0] = nw.InputLayer(input_channels=channels)
layers[1] = nw.HiddenLayer(N=1, output_channel='red',excitation_channel='blue',inhibition_channel='red')
layers[2] = nw.OutputLayer(output_channels=channels) # for now it can be the same as input
###Output
_____no_output_____
###Markdown
3. Define existing connections between layersThe weights are set in two steps. First the connetions between layers are defined. This should be done using the keys defined for each layer above, i.e. 0, 1, 2 ... for input, hidden and output layers, respectively. The `connect_layers` function returns a weight matrix object that we store under a chosen key, for example `'inp->hid'`.Second, the specific connections on the node-to-node level are specified using the node index in each layer and the function `connect_nodes` of the weights object.
###Code
# Define the overall connectivity
weights = {}
# The syntax is connect_layers(from_layer, to_layer, layers, channels)
weights['inp->hid'] = nw.connect_layers(0, 1, layers, channels)
weights['hid->out'] = nw.connect_layers(1, 2, layers, channels)
# Recurrent connections possible
weights['hid->hid'] = nw.connect_layers(1, 1, layers, channels)
# Define the specific node-to-node connections in the weight matrices
# The syntax is connect_nodes(from_node, to_node, weight=value in weight matrix (default=1.0))
weights['inp->hid'].connect_nodes(channels['blue'] ,0, channel='blue') # channels['blue']=1
weights['inp->hid'].connect_nodes(channels['red'] ,0, channel='red') # channels['red']=0
weights['hid->out'].connect_nodes(0, channels['red'], 'red')
# The explicit weight matrix can be set by
# weights['inp->hid'].W = np.array([])
# where the array should have the shape of weights['inp->hid'].W.shape()
###Output
_____no_output_____
###Markdown
4. Visualize the network The `plotter` module supplies functions to visualize the network structure. The nodes are named by the layer type (Input, Hidden or Output) and the index.
###Code
plotter.visualize_network(layers, weights)
###Output
_____no_output_____
###Markdown
5. Specify the physics of the nodesBefore running any simulations, we need to specify the input currents and the physics of the hidden layer nodes. Parameters can either be specified directly or coupled from the `physics` module.
###Code
# Define a devices for the hidden layer
device = physics.Device('../parameters/device_parameters.txt')
# Assign it to the hidden layer
layers[1].assign_device(device)
# Get a quick report on what kind of currents that we need
unity_coeff, Imax = device.inverse_gain_coefficient(device.eta_ABC, layers[1].Vthres)
print(f'Unity coupling coefficient calculated as unity_coeff={unity_coeff:.4f}')
print(f'Imax is found to be {Imax} nA')
# Specify an exciting current square pulse and an inhibition square pulse
t_blue = [(2,3),(5.5,6),(7.5,8)] #
# and inhibition from the other one
t_red = [(4,5)] #
# Use the square pulse function and specify which node in the input layer gets which pulse
layers[0].set_input_func(channel='blue',func_handle=physics.square_pulse, func_args=(t_blue,Imax))
layers[0].set_input_func(channel='red', func_handle=physics.square_pulse, func_args=(t_red, Imax))
###Output
_____no_output_____
###Markdown
6. Example dynamic simulation
###Code
# Start time t, end time T
t = 0.0
T = 10.0 # ns
# These parameters are used to determine an appropriate time step each update
dtmax = 0.01 # ns
dVmax = 0.001 # V
nw.reset(layers)
# Create an instance of Logger to store the data
time_log = logger.Logger(layers,channels)
start = time.time()
while t < T:
# evolve by calculating derivatives, provides dt
dt = tm.evolve(t, layers, dVmax, dtmax )
# update with explicit Euler using dt
tm.update(dt, t, layers, weights)
t += dt
# Log the update
time_log.add_tstep(t, layers)
end = time.time()
print('Time used:',end-start)
res = time_log.get_timelog()
###Output
Time used: 0.5085978507995605
###Markdown
Visualize results using `plotter` module
###Code
# Plot the results for a node
plotter.plot_nodes(res,['H0'])
# Plot the linked currents from I1/I0 to O0
G = plotter.retrieve_G(layers, weights)
plotter.plot_chainlist(res,G,'I1','O0')
###Output
_____no_output_____
###Markdown
Using the `py-mie` libraryBelow is a very quick, hacked together jupyter notebook that details how to use the `py-mie` python library to make basic Mie calculations for homogeneous and heterogeneous particles.There are just three functions available in the library: * `bhmie_scatter` * `core_shell_scatter` * `integrate_mode`
###Code
import mie
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import seaborn as sns
rc = {
"figure.figsize": (12,6),
"xtick.major.size": 12.0,
"xtick.minor.size": 8.0,
"ytick.major.size": 12.0,
"ytick.minor.size": 8.0,
"axes.linewidth": 1.75,
"xtick.color": '0',
"ytick.color": '0',
"axes.labelcolor": '0'
}
sns.set("notebook", style="ticks", palette='deep', rc=rc, font_scale=1.75)
%matplotlib inline
###Output
_____no_output_____
###Markdown
`mie.bhmie_scatter(particle_radius, radiation_lambda, n_particle)`The `bhmie_scatter` function computes the scattering/absoprtion efficiency and assymetry parameter for a homogeneous particle.The function has three parameters: * `particle_radius`: total particle radius (core and shell) in microns * `radiation_lambda`: wavelength of the incident light in microns * `n_particle`: complex refractive index of the particle material The function returns three floats: * $Q_{sca}$: Scattering efficiency * $Q_{abs}$: Absoprtion efficiency * $asym$: assymetry parameter for the specified particle Example: Scattering and Absorption by Homogeneous Black Carbon
###Code
# Define the complex refractive index for Black Carbon (Data from Sienfeld and Pandis)
refr_BC = 1.95 + 0.79*1j
# Define the wavelength of incident light (658 nm)
wl = 0.658
# Let's plot over a range of particle diameters
diams = np.logspace(-2, 1, 500)
res = {
"Qabs": np.zeros(len(diams)),
"Qsca": np.zeros(len(diams)),
}
for i, dp in enumerate(diams):
qsca, qabs, _ = mie.bhmie_scatter(
particle_radius=dp/2.,
radiation_lambda=wl,
n_particle=refr_BC)
res["Qabs"][i] = qabs
res["Qsca"][i] = qsca
# Plot
fig, ax = plt.subplots(1, figsize=(14,7))
ax.plot(diams, res["Qabs"], label="$Q_{abs}$", lw=6)
ax.plot(diams, res["Qsca"], label="$Q_{sca}$", lw=6)
ax.set_title("Scattering and Absorption by BC at $\lambda={:.0f}\;nm$".format(wl*1000))
ax.set_ylabel("$\eta$")
ax.set_xlabel("$D_P\;[\mu m]$")
ax.legend(loc='best')
ax.semilogx()
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.3g"))
sns.despine(offset=10)
###Output
_____no_output_____
###Markdown
Example: Scattering by Homogeneous Particles of Various CompositionLet's try plotting the scattering by various particles...
###Code
# Let's plot over a range of particle diameters
diams = np.logspace(-1, 1, 500)
particles = []
particles.append(("Amm. Sulfate", 1.521 + 0*1j)) # PubChem (589nm)
particles.append(("BC", 1.95 + 0.79*1j))
particles.append(("Sulfuric Acid", 1.4183 + 0*1j)) # CRC (589nm)
particles.append(("Water", 1.331 + 1.64e-8*1j)) # S+P T15.1 (650nm)
fig, ax = plt.subplots(1, figsize=(14,7))
for each in particles:
res = []
for i, dp in enumerate(diams):
qsca, _, _ = mie.bhmie_scatter(dp/2., wl, each[1])
res.append(qsca)
ax.plot(diams, res, label=each[0], lw=6)
ax.set_title("Scattering by Homogeneous Particles at $\lambda={:.0f}\;nm$".format(wl*1000))
ax.set_ylabel("$Q_{sca}$")
ax.set_xlabel("$D_P\;[\mu m]$")
ax.legend(loc='best')
ax.semilogx()
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.3g"))
sns.despine(offset=10)
###Output
_____no_output_____
###Markdown
`mie.core_shell_scatter(particle_radius, core_fraction, radiation_lambda, n_shell, n_particle)`The `core_shell_scatter` function computes the scattering/absoprtion efficiency and assymetry parameter for a heterogeneous, core-shell mixed particle.The function has five parameters: * `particle_radius`: total particle radius (core and shell) in microns * `core_fraction`: the fraction of the particle comprised by it's core (0.0-1.0) * `radiation_lambda`: wavelength of the incident light in microns * `n_core`: complex refractive index of the particle core material * `n_shell`: complex refractive index of the particle shell material The function returns three floats: * $Q_{sca}$: Scattering efficiency * $Q_{abs}$: Absoprtion efficiency * $asym$: assymetry parameter for the specified particle Example: Scattering and Absorption by BC Coated with SO4
###Code
core_frac = np.linspace(0.0, 1.0, 5)
refr_SO4 = [
1.53+imag*1j for imag in \
[0.158, 0.057, 0.003, 0.001, 0.001, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.551]
]
refr_BC = 1.95 + 0.79*1j
res = []
fig, ax = plt.subplots(1, figsize=(14,7))
for frac in core_frac:
res = []
for dp in diams:
qsca, qabs, _ = mie.core_shell_scatter(
particle_radius=dp/2.,
core_fraction=frac,
radiation_lambda=wl,
n_core=refr_BC,
n_shell=refr_SO4
)
res.append(qsca)
ax.plot(diams, res, label="Core Frac={:.1f}".format(frac), lw=6)
ax.set_title("Scattering by Heterogeneous BC/SO4 at $\lambda={:.0f}\;nm$".format(wl*1000))
ax.set_ylabel("$Q_{sca}$")
ax.set_xlabel("$D_P\;[\mu m]$")
ax.legend(loc='best')
ax.semilogx()
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.3g"))
sns.despine(offset=10)
###Output
_____no_output_____
###Markdown
`mie.integrate_mode(core_fraction, n_shell, n_core, radiation_lambda, mode_radius, mode_sigma, r_min=1e-3, r_max=100., nr=200)`The `integrate_mode` function integrates the Mie theory calculation over a lognormal aerosol mode with homogeneous particle properties, weighting by its size distribution.The function has six required parameters: * `core_fraction`: the fraction of the particle comprised by it's core (0.0-1.0) * `n_core`: complex refractive index of the particle core material * `n_shell`: complex refractive index of the particle shell material * `radiation_lambda`: wavelength of the incident light in microns * `mode_radius`: the geometric mean or mode radius of the aerosol size distribution in microns * `mode_sigma`: the geometric standard deviation of the aerosol size distribution The function also has three optional parameters: * `r_min`: the minimum radius to integrate over * `r_max`: the maximum radius to integrate over * `nr`: the number of particle radii to use in the integration The function returns three floats: * $Q_{sca}$: Scattering efficiency * $Q_{abs}$: Absoprtion efficiency * $asym$: assymetry parameter for the specified particle Example: Scattering of Ammonium Sulfate at 658 nm for Various Size Distributions
###Code
refr_ammsulf = 1.521 + 0*1j
# Range of Geo. Means
gm_range = np.linspace(0.05, 0.3, 50)
# Range of Geo. Standard Deviations
gsd_range = np.linspace(1.25, 2.5, 50)
val_matrix = np.zeros((len(gm_range), len(gsd_range))) * np.nan
for i, gm in enumerate(gm_range):
for j, gsd in enumerate(gsd_range):
qsca, qabs, _ = mie.integrate_mode(
core_fraction=1,
n_shell=refr_BC,
n_core=refr_ammsulf,
radiation_lambda=wl,
mode_radius=gm,
mode_sigma=gsd
)
val_matrix[i][j] = qsca
# Plot the results
fig, ax = plt.subplots(1, figsize=(12,8))
im = plt.pcolormesh(gm_range, gsd_range, val_matrix.T, cmap="seismic")
ax.set_xlabel("$GM\;[\mu m]$")
ax.set_ylabel("$GSD$")
ax.set_title("Integrated Scattering of Amm. Sulfate at 658 nm", y=1.05)
ax.set_ylim([gsd_range.min(), gsd_range.max()])
plt.colorbar(im, label="$Q_{sca}$")
plt.show()
###Output
_____no_output_____
###Markdown
Get all your favourite imports
###Code
import math
import numpy as np
import pandas as pd
from scipy import stats
from IPython.display import display
from collections import Counter
from tqdm.notebook import tqdm
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.preprocessing import StandardScaler
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
from PyImpetus import inter_IAMB
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import KFold, StratifiedKFold
import time
import warnings
warnings.filterwarnings("ignore")
###Output
_____no_output_____
###Markdown
The JanataHack Cross-Sell AnalyticsVidhya Hackathon dataset requires some specific preprocessing
###Code
df_train = pd.read_csv("train.csv")
df_test = pd.read_csv("test.csv")
# Categorical preprocessing for catboost
df_train['Driving_License'] = "D_" + df_train['Driving_License'].astype(str)
df_test['Driving_License'] = "D_" + df_test['Driving_License'].astype(str)
# Categorical preprocessing for catboost
df_train['Region_Code'] = "D_" + df_train['Region_Code'].astype(str)
df_test['Region_Code'] = "D_" + df_test['Region_Code'].astype(str)
# Categorical preprocessing for catboost
df_train['Policy_Sales_Channel'] = "D_" + df_train['Policy_Sales_Channel'].astype(str)
df_test['Policy_Sales_Channel'] = "D_" + df_test['Policy_Sales_Channel'].astype(str)
# Categorical preprocessing for catboost
vehicle_damage_map = {"Yes": 1, "No": 0}
df_train['Vehicle_Damage'] = df_train['Vehicle_Damage'].map(vehicle_damage_map)
df_test['Vehicle_Damage'] = df_test['Vehicle_Damage'].map(vehicle_damage_map)
# Categorical preprocessing for catboost
vehicle_age_map = {"> 2 Years": 2, "1-2 Year": 1, "< 1 Year": 0}
df_train['Vehicle_Age'] = df_train['Vehicle_Age'].map(vehicle_age_map)
df_test['Vehicle_Age'] = df_test['Vehicle_Age'].map(vehicle_age_map)
# Since catboost requires string type for its categorical features while other models need label encoding
# We create a new dataframe, just for feature-selection.
# This problem will not arise for other downstream classfiers such as lightgbm, XGBoost, etc.
df_train_ = pd.read_csv("train.csv")
# Create a map to convert categorical features to numerical
gender_map = {"Male": 0, "Female": 1}
vehicle_age_map = {'< 1 Year': 0, '1-2 Year': 1, '> 2 Years': 2}
vehicle_damage = {'Yes': 0, 'No': 1}
df_train_["Gender"] = df_train_["Gender"].map(gender_map)
df_train_['Vehicle_Age'] = df_train_['Vehicle_Age'].map(vehicle_age_map)
df_train_['Vehicle_Damage'] = df_train_['Vehicle_Damage'].map(vehicle_damage)
# This feature is not allowed in the competition
df_train.drop(["id"], axis=1, inplace=True)
df_train_.drop(["id"], axis=1, inplace=True)
display(df_train.head())
print()
display(df_train_.head())
###Output
_____no_output_____
###Markdown
Perform feature selection and then perform CV to check results PyImpetus uses CV internally to select the best features. So no need to run your own CV for FS
###Code
# Initialize your catalyst object
fs = inter_IAMB(num_simul=10)
# The fit function returns a list of the features selected
feats = fs.fit(df_train_, "Response")
# The transform function prunes your pandas dataset to the set of final features
X_train = fs.transform(df_train).values
# Prune the test dataset as well
X_test = fs.transform(df_test).values
Y = df_train["Response"].values
# Get indices for categorical features. Will require for catboost
cat_feat = [df_train[feats].columns.get_loc(i) for i in ['Gender', 'Driving_License', 'Region_Code','Vehicle_Damage', 'Policy_Sales_Channel'] if i in df_train[feats].columns]
print("\nX_train_shape: ", X_train.shape, "\nX_test_shape: ", X_test.shape, "\nY_shape: ", Y.shape)
print("\n\n")
# # Uncomment this if you dont want to use feature selection
# X_train = df_train.drop(["Response"], axis=1).values
# X_test = df_test.drop(["id"], axis=1).values
# Y = df_train["Response"].values
# cat_feat = [df_train.columns.get_loc(i) for i in ['Gender', 'Driving_License', 'Region_Code','Vehicle_Damage', 'Policy_Sales_Channel']]
# Now let's perform Kfold and see what results we get
kfold, scores = KFold(n_splits=5, random_state=27, shuffle=True), list()
for train, test in kfold.split(X_train):
x_train, x_test = X_train[train], X_train[test]
y_train, y_test = Y[train], Y[test]
model = CatBoostClassifier(random_state=27, verbose=250)
model.fit(x_train, y_train, cat_features=cat_feat)
preds_proba = model.predict_proba(x_test)[:,1]
score = roc_auc_score(y_test, preds_proba)
scores.append(score)
print("Score: ", score)
print("Final Average: ", sum(scores)/len(scores))
###Output
CV Number: 1
#############################
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age']
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age', 'Region_Code']
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age', 'Region_Code', 'Gender']
Final features selected in this fold: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age', 'Region_Code', 'Gender']
CV Number: 2
#############################
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age']
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age', 'Region_Code']
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age', 'Region_Code', 'Gender']
Final features selected in this fold: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age', 'Region_Code', 'Gender']
CV Number: 3
#############################
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age']
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age', 'Region_Code']
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age', 'Region_Code', 'Gender']
Final features selected in this fold: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age', 'Region_Code', 'Gender']
CV Number: 4
#############################
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Vehicle_Age', 'Policy_Sales_Channel', 'Age']
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Vehicle_Age', 'Policy_Sales_Channel', 'Age', 'Region_Code']
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Vehicle_Age', 'Policy_Sales_Channel', 'Age', 'Region_Code', 'Gender']
Final features selected in this fold: ['Previously_Insured', 'Vehicle_Damage', 'Vehicle_Age', 'Policy_Sales_Channel', 'Age', 'Region_Code', 'Gender']
CV Number: 5
#############################
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age']
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age', 'Region_Code']
Candidate features: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age', 'Region_Code', 'Gender']
Final features selected in this fold: ['Previously_Insured', 'Vehicle_Damage', 'Policy_Sales_Channel', 'Age', 'Vehicle_Age', 'Region_Code', 'Gender']
FINAL SELECTED FEATURES
##################################
Feature: Previously_Insured Probability Score: 1.0
Feature: Vehicle_Damage Probability Score: 1.0
Feature: Policy_Sales_Channel Probability Score: 1.0
Feature: Age Probability Score: 1.0
Feature: Vehicle_Age Probability Score: 1.0
Feature: Region_Code Probability Score: 1.0
Feature: Gender Probability Score: 1.0
X_train_shape: (381109, 7)
X_test_shape: (127037, 7)
Y_shape: (381109,)
Learning rate set to 0.118484
0: learn: 0.5067103 total: 75.2ms remaining: 1m 15s
250: learn: 0.2621586 total: 16.8s remaining: 50.1s
500: learn: 0.2605203 total: 34.3s remaining: 34.2s
750: learn: 0.2591693 total: 52.7s remaining: 17.5s
999: learn: 0.2578612 total: 1m 10s remaining: 0us
Score: 0.8590058318296601
Learning rate set to 0.118484
0: learn: 0.5070373 total: 75.3ms remaining: 1m 15s
250: learn: 0.2630020 total: 17.4s remaining: 51.8s
500: learn: 0.2613595 total: 35.3s remaining: 35.1s
750: learn: 0.2599791 total: 53.9s remaining: 17.9s
999: learn: 0.2586046 total: 1m 12s remaining: 0us
Score: 0.8585210880781927
Learning rate set to 0.118484
0: learn: 0.5044411 total: 75.3ms remaining: 1m 15s
250: learn: 0.2619414 total: 17.3s remaining: 51.6s
500: learn: 0.2602182 total: 35.6s remaining: 35.5s
750: learn: 0.2588083 total: 53.7s remaining: 17.8s
999: learn: 0.2575082 total: 1m 12s remaining: 0us
Score: 0.8571659430213202
Learning rate set to 0.118484
0: learn: 0.5039459 total: 92.3ms remaining: 1m 32s
250: learn: 0.2618897 total: 17.7s remaining: 52.9s
500: learn: 0.2600846 total: 35.8s remaining: 35.6s
750: learn: 0.2585783 total: 54.1s remaining: 17.9s
999: learn: 0.2573148 total: 1m 12s remaining: 0us
Score: 0.8560407183431029
Learning rate set to 0.118485
0: learn: 0.5022396 total: 98.5ms remaining: 1m 38s
250: learn: 0.2619347 total: 17.5s remaining: 52.3s
500: learn: 0.2601808 total: 35.7s remaining: 35.5s
750: learn: 0.2587157 total: 53.8s remaining: 17.8s
999: learn: 0.2573977 total: 1m 11s remaining: 0us
Score: 0.8587037341219691
Final Average: 0.8578874630788491
###Markdown
Final training. Then generate submission file to upload on AV
###Code
# Final Average (using PyImpetus): 0.8579 [on LB: 0.8576] Rank=166/600
# Final Average (w/o using PyImpetus): 0.8576 [on LB: 0.8568] Rank=223/600
# Finally train the model on the whole dataset
model = CatBoostClassifier(random_state=27, verbose=250)
model.fit(X_train, Y, cat_features=cat_feat)
preds_proba = model.predict_proba(X_test)[:,1]
# And make a submission
fp = open("submit.csv", "w")
fp.write("id,Response\n")
for id_, pred in zip(df_test["id"].values, preds_proba):
fp.write(str(id_)+","+str(pred)+"\n")
fp.close()
###Output
Learning rate set to 0.130329
0: learn: 0.4893033 total: 119ms remaining: 1m 59s
250: learn: 0.2610622 total: 23.6s remaining: 1m 10s
500: learn: 0.2585179 total: 48.5s remaining: 48.3s
750: learn: 0.2563509 total: 1m 15s remaining: 24.9s
999: learn: 0.2544470 total: 1m 41s remaining: 0us
###Markdown
CompositionDE - Compositional analysis of single-cell data This notebook serves as a tutorial for using the *SCDCdm* package to analyze changes in cell composition data.The package is intended to be used with data coming from single-cell RNA-seq experiments, however there are no restrictions that prevent the use of data from other sources.The data we use in the following example comes from [*Haber et al. [2017]*](https://www.nature.com/articles/nature24489).It contains samples from the small intestinal epithelium of mice with different conditions.
###Code
# Setup
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import arviz as az
from scdcdm.util import comp_ana as mod
from scdcdm.util import cell_composition_data as dat
###Output
_____no_output_____
###Markdown
Data preparation
###Code
# Load data
cell_counts = pd.read_csv("../data/haber_counts.csv")
print(cell_counts)
###Output
Mouse Endocrine Enterocyte Enterocyte.Progenitor Goblet Stem \
0 Control_1 36 59 136 36 239
1 Control_2 5 46 23 20 50
2 Control_3 45 98 188 124 250
3 Control_4 26 221 198 36 131
4 H.poly.Day10_1 42 71 203 147 271
5 H.poly.Day10_2 40 57 383 170 321
6 H.poly.Day3_1 52 75 347 66 323
7 H.poly.Day3_2 65 126 115 33 65
8 Salm_1 37 332 113 59 90
9 Salm_2 32 373 116 67 117
TA TA.Early Tuft
0 125 191 18
1 11 40 5
2 155 365 33
3 130 196 4
4 109 180 146
5 244 256 71
6 263 313 51
7 39 129 59
8 47 132 10
9 65 168 12
###Markdown
Looking at the data, we see that we have 4 control samples, and 3 conditions with 2 samples each. To use the models in *SCDCdm*, we first have to convert the data into an [anndata](https://github.com/theislab/anndata) object.This object separates our data components: Cell counts are stored in `data.X`, covariates in `data.obs`.For our first example, we want to look at how the Salmonella infection influences the cell composition.
###Code
# Convert data to anndata object
# Filter out control and salmonella data
salm_indices = [0, 1, 2, 3, 8, 9]
salm_df = cell_counts.iloc[salm_indices, :]
# Convert to a CompositionalData object
data_salm = dat.from_pandas(salm_df, covariate_columns=["Mouse"])
# Extract condition from mouse name and add it as an extra column to the covariates
data_salm.obs["Condition"] = data_salm.obs["Mouse"].str.replace(r"_[0-9]", "")
print(data_salm.X)
print(data_salm.obs)
###Output
Transforming to str index.
###Markdown
Plotting the data, we can see that there is a large increase of Enterocytes in the infected sampes, while most other cell types slightly decrease.Since scRNA-seq experiments are limited in the number of cells per sample, the count data is compositional, which leads to negative correlations between the cell types.Thus, the slight decreases in many cell types might be fully caused by the increase in Enterocytes.
###Code
fig, ax = plt.subplots(figsize=(12,5))
df = pd.melt(salm_df, id_vars=['Mouse'], value_vars=salm_df.columns[1:])
sns.set_context('notebook')
sns.set_style('ticks')
d = sns.barplot(x='variable', y = 'value', hue=df["Mouse"].str.replace(r"_[0-9]", ""), data=df)
d.set_ylabel('Cell Count')
loc, labels = plt.xticks()
d.set_xticklabels(labels, rotation=90)
d.set_xlabel('Cell type')
plt.show()
###Output
_____no_output_____
###Markdown
*Note that the use of* anndata *in* SCDCdm *is different from the use in scRNA-seq pipelines, e.g.* scanpy. *To convert* scanpy *objects to a SCDCdm dataset, have a look at `dat.from_scanpy_list`.* Model setup and inferenceWe can now create the model and run inference on it. The `mod.CompositionalAnalysis` class takes our data object and performs parameter inference on it.The `formula` parameter specifies how the covariates are used in the model. It can process R-style formulas via the [patsy](https://patsy.readthedocs.io/en/latest/) package, e.g. `formula="Cov1 + Cov2 + Cov3"`.The `baseline_index` parameter is used to specify a cell type that is left unchanged by the covariates.This feature is optional, a baseline index of `None` specifies the model without baseline specification.For now, we will use no baseline index.
###Code
model_salm = mod.CompositionalAnalysis(data_salm, formula="Condition", baseline_index=None)
###Output
_____no_output_____
###Markdown
HMC sampling can be performed by calling `sample_hmc()` on the model, which produces a `scdcdm.util.result_classes.CAResult` object.
###Code
# Run MCMC
sim_results = model_salm.sample_hmc()
###Output
MCMC sampling finished. (121.185 sec)
Acceptance rate: 54.2%
###Markdown
Result interpretationCalling `summary()` on the results object, we can see all relevant information for further analysis:
###Code
sim_results.summary()
###Output
Compositional Analysis summary:
Data: 6 samples, 8 cell types
Baseline index: None
Formula: Condition
Intercepts:
Final Parameter Expected Sample
Cell Type
Endocrine 1.093 34.283562
Enterocyte 2.312 116.008793
Enterocyte.Progenitor 2.515 142.119178
Goblet 1.673 61.231759
Stem 2.691 169.468316
TA 2.101 93.940912
TA.Early 2.861 200.871618
Tuft 0.396 17.075864
Effects:
Final Parameter Expected Sample \
Covariate Cell Type
Condition[T.Salm] Endocrine 0.000000 23.718630
Enterocyte 1.436528 337.575408
Enterocyte.Progenitor 0.000000 98.323280
Goblet 0.000000 42.362385
Stem 0.000000 117.244422
TA 0.000000 64.991782
TA.Early 0.000000 138.970382
Tuft 0.000000 11.813711
log2-fold change
Covariate Cell Type
Condition[T.Salm] Endocrine -0.531496
Enterocyte 1.540976
Enterocyte.Progenitor -0.531496
Goblet -0.531496
Stem -0.531496
TA -0.531496
TA.Early -0.531496
Tuft -0.531496
###Markdown
**Model properties**First, the summary shows an overview over the model properties: * Number of samples/cell types* Index of the baseline cell type, starting at 0.* The formula usedThe model has two types of parameters that are relevant for analysis - intercepts and effects. These can be interpreted like in a standard regression model:Intercepts show how the cell types are distributed without any active covariates, effects show ho the covariates influence the cell types.**Intercepts**The first column of the intercept summary shows the parameters determined by the MCMC inference.The "Expected sample" column gives some context to the numerical values. If we take the mean number of cells over all samples from our dataset, then the model expects a new sample with exaxtly that many cells to look like this.**Effects**For the effect summary, the first column again shows the inferred parameters for all combinations of covariates and cell types. A value is zero means that no significant effect was detected. For a value other than zero, a significant change was detected. A positive sign indicates an increase, a negative sign a decrease.The "Expected sample" and "log2-fold change" columns give us an idea on the magnitude of this increase. The expected sample is calculated for each covariate separately (covariate value = 1), with the same method as for the intercepts.The log-fold change is then calculated between this column and the expected intercept sample.Since the data is compositional, cell types for which no significant change was detected, are expected to change as well.**Interpretation**In the salmonella case, we see only a significant increase of Enterocytes, while all other cell types are unaffected by the disease.The log-fold change of Enterocytes between control and infected samples with the same total cell count lies at about 1.54. Diagnostics and plottingFor further analysis, `summary_extended()` gives us more information about the model:
###Code
sim_results.summary_extended()
###Output
Compositional Analysis summary (extended):
Data: 6 samples, 8 cell types
Baseline index: None
Formula: Condition
Spike-and-slab threshold: 0.692
MCMC Sampling: Sampled 20000 chain states (5000 burnin samples) in 121.185 sec. Acceptance rate: 54.2%
Intercepts:
Final Parameter HPD 3% HPD 97% SD \
Cell Type
Endocrine 1.093 0.405 1.819 0.383
Enterocyte 2.312 1.713 2.898 0.314
Enterocyte.Progenitor 2.515 1.924 3.118 0.322
Goblet 1.673 0.983 2.292 0.352
Stem 2.691 2.141 3.302 0.311
TA 2.101 1.456 2.682 0.330
TA.Early 2.861 2.279 3.428 0.312
Tuft 0.396 -0.375 1.185 0.423
Expected Sample
Cell Type
Endocrine 34.283562
Enterocyte 116.008793
Enterocyte.Progenitor 142.119178
Goblet 61.231759
Stem 169.468316
TA 93.940912
TA.Early 200.871618
Tuft 17.075864
Effects:
Final Parameter HPD 3% HPD 97% \
Covariate Cell Type
Condition[T.Salm] Endocrine 0.000000 -0.062 1.160
Enterocyte 1.436528 0.977 1.921
Enterocyte.Progenitor 0.000000 -0.065 0.629
Goblet 0.000000 -0.012 1.053
Stem 0.000000 -0.392 0.116
TA 0.000000 -0.378 0.252
TA.Early 0.000000 -0.092 0.394
Tuft 0.000000 -0.403 1.113
SD Inclusion probability \
Covariate Cell Type
Condition[T.Salm] Endocrine 0.394 0.411267
Enterocyte 0.255 1.000000
Enterocyte.Progenitor 0.194 0.293000
Goblet 0.355 0.472667
Stem 0.134 0.201267
TA 0.151 0.219600
TA.Early 0.139 0.205600
Tuft 0.369 0.342533
Expected Sample log2-fold change
Covariate Cell Type
Condition[T.Salm] Endocrine 23.718630 -0.531496
Enterocyte 337.575408 1.540976
Enterocyte.Progenitor 98.323280 -0.531496
Goblet 42.362385 -0.531496
Stem 117.244422 -0.531496
TA 64.991782 -0.531496
TA.Early 138.970382 -0.531496
Tuft 11.813711 -0.531496
###Markdown
The spike-and-slab threshold value dpends on the number of cell types and determines the inclusion probability cutoff for significant effects.Further, the extended summary includes some information on the MCMC sampling procedure (chain length, burn-in, acceptance rate, duration).For both effects and intercepts, we also get the standard deviation and HPD interval endpoints of the elements of the generated Markov chain.The width of the HPD interval can be set by e.g. `summary_extended(credible_interval=0.9)`The effects summary also includes the spike-and-slab inclusion probability for each effect, i.e. the share of MCMC samples, for which this effect was not set to 0 by the spike-and-slab prior.We can also use the summary tables from `summary_extended()` as pandas DataFrames to tweak them further, for example we can show only the cell types with significant effects:
###Code
# Intercept dataframe: sim_result.intercept_df
# Effect dataframe: sim_result.effect_df
sig_effects = sim_results.effect_df.loc[sim_results.effect_df["Final Parameter"] != 0]
print(sig_effects)
###Output
Final Parameter HPD 3% HPD 97% SD \
Covariate Cell Type
Condition[T.Salm] Enterocyte 1.436528 0.977 1.921 0.255
Inclusion probability Expected Sample \
Covariate Cell Type
Condition[T.Salm] Enterocyte 1.0 337.575408
log2-fold change
Covariate Cell Type
Condition[T.Salm] Enterocyte 1.540976
###Markdown
Also, the results object supports all plotting and diagnosis functions of [arviz](https://github.com/arviz-devs/arviz).
###Code
# Example: Plot Markov chain density for all effects
az.plot_density(sim_results, var_names="beta")
plt.show()
###Output
_____no_output_____
###Markdown
Tweaking the model: categorical covariates, BaselineThe compositional analysis models from *SCDCdm* are also able to automatically deal with categorical covariates via the [patsy](https://patsy.readthedocs.io/en/latest/) framework for formula specification. Per default, categorical variables are encoded via full-rank treatment coding. Hereby, the value of the first sample in the dataset is used as the reference (control) category.We can change this by tweaking the model formula to be `"C(, Treatment(''))"`:
###Code
# Set salmonella infection as reference category
model_salm_switch_ref = mod.CompositionalAnalysis(data_salm, formula="C(Condition, Treatment('Salm'))", baseline_index=None)
switch_results = model_salm_switch_ref.sample_hmc()
switch_results.summary()
###Output
MCMC sampling finished. (135.503 sec)
Acceptance rate: 49.1%
Compositional Analysis summary:
Data: 6 samples, 8 cell types
Baseline index: None
Formula: C(Condition, Treatment('Salm'))
Intercepts:
Final Parameter Expected Sample
Cell Type
Endocrine 1.305 28.853631
Enterocyte 3.731 326.436632
Enterocyte.Progenitor 2.574 102.640834
Goblet 1.886 51.585254
Stem 2.660 111.858631
TA 2.059 61.327959
TA.Early 2.883 139.803218
Tuft 0.468 12.493842
Effects:
Final Parameter \
Covariate Cell Type
C(Condition, Treatment('Salm'))[T.Control] Endocrine 0.000000
Enterocyte -1.416635
Enterocyte.Progenitor 0.000000
Goblet 0.000000
Stem 0.000000
TA 0.000000
TA.Early 0.000000
Tuft 0.000000
Expected Sample \
Covariate Cell Type
C(Condition, Treatment('Salm'))[T.Control] Endocrine 40.992691
Enterocyte 112.478083
Enterocyte.Progenitor 145.823032
Goblet 73.287772
Stem 158.918863
TA 87.129348
TA.Early 198.620065
Tuft 17.750147
log2-fold change
Covariate Cell Type
C(Condition, Treatment('Salm'))[T.Control] Endocrine 0.506614
Enterocyte -1.537159
Enterocyte.Progenitor 0.506614
Goblet 0.506614
Stem 0.506614
TA 0.506614
TA.Early 0.506614
Tuft 0.506614
###Markdown
We can also handle multiple levels of one categorical covariate:
###Code
# Get dataset with all three diseases
data_all = dat.from_pandas(cell_counts, covariate_columns=["Mouse"])
data_all.obs["Condition"] = data_all.obs["Mouse"].str.replace(r"_[0-9]", "")
print(data_all.X)
print(data_all.obs)
# model all three diseases at once
model_all = mod.CompositionalAnalysis(data_all, formula="Condition", baseline_index=None)
switch_results = model_all.sample_hmc()
switch_results.summary()
###Output
MCMC sampling finished. (116.588 sec)
Acceptance rate: 51.3%
Compositional Analysis summary:
Data: 10 samples, 8 cell types
Baseline index: None
Formula: Condition
Intercepts:
Final Parameter Expected Sample
Cell Type
Endocrine 0.940 44.394709
Enterocyte 1.970 124.352502
Enterocyte.Progenitor 2.339 179.849482
Goblet 1.427 72.249130
Stem 2.431 197.180646
TA 1.881 113.763336
TA.Early 2.555 223.211620
Tuft 0.521 29.198586
Effects:
Final Parameter \
Covariate Cell Type
Condition[T.H.poly.Day10] Endocrine 0.000000
Enterocyte -0.705784
Enterocyte.Progenitor 0.000000
Goblet 0.000000
Stem 0.000000
TA 0.000000
TA.Early 0.000000
Tuft 0.902785
Condition[T.H.poly.Day3] Endocrine 0.000000
Enterocyte 0.000000
Enterocyte.Progenitor 0.000000
Goblet 0.000000
Stem 0.000000
TA 0.000000
TA.Early 0.000000
Tuft 0.000000
Condition[T.Salm] Endocrine 0.000000
Enterocyte 1.484487
Enterocyte.Progenitor 0.000000
Goblet 0.000000
Stem 0.000000
TA 0.000000
TA.Early 0.000000
Tuft 0.000000
Expected Sample \
Covariate Cell Type
Condition[T.H.poly.Day10] Endocrine 45.322072
Enterocyte 62.677999
Enterocyte.Progenitor 183.606368
Goblet 73.758346
Stem 201.299562
TA 116.139744
TA.Early 227.874300
Tuft 73.521621
Condition[T.H.poly.Day3] Endocrine 44.394709
Enterocyte 124.352502
Enterocyte.Progenitor 179.849482
Goblet 72.249130
Stem 197.180646
TA 113.763336
TA.Early 223.211620
Tuft 29.198586
Condition[T.Salm] Endocrine 31.019422
Enterocyte 383.408299
Enterocyte.Progenitor 125.664234
Goblet 50.481834
Stem 137.773845
TA 79.488594
TA.Early 155.962180
Tuft 20.401604
log2-fold change
Covariate Cell Type
Condition[T.H.poly.Day10] Endocrine 0.029826
Enterocyte -0.988405
Enterocyte.Progenitor 0.029826
Goblet 0.029826
Stem 0.029826
TA 0.029826
TA.Early 0.029826
Tuft 1.332270
Condition[T.H.poly.Day3] Endocrine 0.000000
Enterocyte 0.000000
Enterocyte.Progenitor 0.000000
Goblet 0.000000
Stem 0.000000
TA 0.000000
TA.Early 0.000000
Tuft 0.000000
Condition[T.Salm] Endocrine -0.517216
Enterocyte 1.624446
Enterocyte.Progenitor -0.517216
Goblet -0.517216
Stem -0.517216
TA -0.517216
TA.Early -0.517216
Tuft -0.517216
###Markdown
*SCDCdm* also allows us to set a baseline cell type whose effect is always 0. If such a cell type exists, it is recommended to set it as the baseline to avoid interpretion issues.The baseline can easily be specified in the model setup:
###Code
# model salmonella infection with baseline set to Endocrine cells
model_baseline = mod.CompositionalAnalysis(data_salm, formula="Condition", baseline_index="Endocrine")
baseline_results = model_baseline.sample_hmc()
baseline_results.summary()
###Output
MCMC sampling finished. (151.526 sec)
Acceptance rate: 48.1%
Compositional Analysis summary:
Data: 6 samples, 8 cell types
Baseline index: 0
Formula: Condition
Intercepts:
Final Parameter Expected Sample
Cell Type
Endocrine 1.197 37.161808
Enterocyte 2.331 115.501276
Enterocyte.Progenitor 2.536 141.780711
Goblet 1.682 60.357280
Stem 2.715 169.572671
TA 2.120 93.529938
TA.Early 2.880 199.992842
Tuft 0.421 17.103474
Effects:
Final Parameter Expected Sample \
Covariate Cell Type
Condition[T.Salm] Endocrine 0.000000 25.875335
Enterocyte 1.423915 334.021412
Enterocyte.Progenitor 0.000000 98.720259
Goblet 0.000000 42.026072
Stem 0.000000 118.071478
TA 0.000000 65.123807
TA.Early 0.000000 139.252688
Tuft 0.000000 11.908950
log2-fold change
Covariate Cell Type
Condition[T.Salm] Endocrine -0.522243
Enterocyte 1.532032
Enterocyte.Progenitor -0.522243
Goblet -0.522243
Stem -0.522243
TA -0.522243
TA.Early -0.522243
Tuft -0.522243
###Markdown
We are importing `dagger` as `dg` to clearly label all parts of the code that belong to the library and differentiate them from other user-defined functionalities.
###Code
import dagger as dg
from types import MethodType
from torchvision import datasets, transforms
from torch import nn
import torch.nn.functional as F
###Output
_____no_output_____
###Markdown
Define a seed-setting function to be used later for reproducibility purposes
###Code
def set_reproducible_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
import logging
logging.basicConfig(level=logging.DEBUG)
###Output
_____no_output_____
###Markdown
Since `dagger` only provides the experiment orchestration skeleton, the definition of an experiment, of the actions that allow to transition from a state to another, and all custom functionalities that directly depend on the nature of the experiment that one wants to run remain up to the user to define, without `dagger` imposing a particular way to express those steps in a machine learning pipeline. This enables code reuse and maximum flexibility for the user. In most applications, initializing an experiment might mean initializing a model, an optimizer, and a dataloader, for example.
###Code
# Any canonical utility function to fetch data, initialize models and optimizers
# can be incorporated into the state initialization. For example, here's a function that could be
# found in common ML experiment code to get MNIST dataloaders.
def get_dataloaders(dataset, root, train_batch_size, test_batch_size, seed, num_workers=0):
# this example only supports MNIST but can be easily extended to other datasets
if dataset.lower() == "mnist":
transf = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
]
)
train_dataset = datasets.MNIST(
root=root,
train=True,
download=True,
transform=transf,
)
test_dataset = datasets.MNIST(
root=root,
train=False,
download=True,
transform=transf,
)
def _worker_init_fn(worker_id, seed):
# for reproducibility
np.random.seed(int(seed))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=train_batch_size,
shuffle=True,
num_workers=num_workers,
worker_init_fn=partial(_worker_init_fn, seed=seed),
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=test_batch_size,
shuffle=False,
num_workers=num_workers,
worker_init_fn=partial(_worker_init_fn, seed=seed),
)
return train_loader, test_loader
else:
raise ValueError('Only value currently supported for `dataset` is "mnist"')
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 3)
self.conv2 = nn.Conv2d(6, 16, 3)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, int(x.nelement() / x.shape[0]))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_model(model_name):
# this example only supports LeNet but can be easily extended to other models
if model_name.lower() == "lenet":
return LeNet()
else:
raise ValueError('Only value currently supported for `model_name` is "lenet"')
def get_optimizer(optimizer_name, parameters, **kwargs):
# this example only supports adam and sgd but can be easily extended to other optimizers
if optimizer_name.lower() == "adam":
optimizer = torch.optim.Adam(parameters, **kwargs)
elif optimizer_name.lower() == "sgd":
optimizer = torch.optim.SGD(parameters, **kwargs)
else:
raise ValueError("`optimizer_name` must be one of {'adam', 'sgd'}")
return optimizer
###Output
_____no_output_____
###Markdown
With all the generic dataset, model, and optimizer fetching functions above, which are often found in many ML pipelines, we can now enter the `dagger` world and create a custom experiment state type called `MyExperimentState`, which will subclass `dagger`'s `ExperimentState` to specify what it means to be a new state in our own experiments. Specifically, we will have to specify the properties that define and distringuish a state from another, as well as an `initialize_state` method with the instructions on how to generate a state from scratch.
###Code
class MyExperimentState(dg.ExperimentState):
# properties that will be serialized with the state
PROPERTIES = [
"dataset_name",
"model_name",
"optimizer_name",
"dataloader_hparams",
"optimizer_hparams",
"seed",
]
# properties that will not be serialized with the state
NONHASHED_ATTRIBUTES = [
"train_dataloader",
"test_dataloader",
"model",
"optimizer",
]
def __init__(self, parent_sha, experiment_object):
super().__init__(parent_sha, experiment_object)
def initialize_state(self, init_schema=None, parallel=False, **kwargs):
"""Initializing a state, in this example, means instantiating the right model, dataloaders, and optimizer.
"""
self.seed = kwargs.get("seed", 0)
set_reproducible_seed(self.seed) # function defined at the top of the notebook
# Instantiate dataloaders
self.train_dataloader, self.test_dataloader = get_dataloaders(**self.dataloader_hparams)
# Instantiate model
self.model = get_model(self.model_name)
# Instantiate optimizer
self.optimizer = get_optimizer(
self.optimizer_name,
self.model.parameters(),
**self.optimizer_hparams,
)
###Output
_____no_output_____
###Markdown
Now that we have defined a new type of experiment state that is unique to the experiments we want to run, let's initialize an experiment by specifying the type of experiment states it will hold and the directory where they will be stored.
###Code
exp = dg.Experiment(directory='./example/', state_class=MyExperimentState)
exp.directory
###Output
_____no_output_____
###Markdown
The experiment is currently an empty container, as no instructions have been provided to initialize the experiment
###Code
exp.leaves
exp.root is None
exp.graph is None
###Output
_____no_output_____
###Markdown
The experiment container has been created, but it still doesn't contain any experiment state. Let's start by creating a root, from which all other experiment states will originate. This means creating an instance of `MyExperimentState`. Instead of doing it manually in an ephemeral way, we can use the `.spawn_new_tree` method of a `dagger` `Experiment`.
###Code
seed = 123
dataloader_hparams = {
"dataset": "mnist",
"root": "~/data/",
"train_batch_size": 64,
"test_batch_size": 64,
"num_workers": 0,
"seed": seed,
}
optimizer_hparams = {"lr": 0.01}
# Let the Experiment `exp` know what model, dataset, optimizer (and all their hyperparams) to use to generate
# the root state.
x = exp.spawn_new_tree(
dataset_name=dataloader_hparams["dataset"],
model_name="lenet",
optimizer_name="sgd",
dataloader_hparams=dataloader_hparams,
optimizer_hparams=optimizer_hparams,
seed=seed,
)
exp.root
# ^^this should also be added to the nodes and leaves??
exp.experiment_arguments
exp.root.model
exp.root.train_dataloader
exp.root.sha()
exp.root.seed
exp.root.path
###Output
_____no_output_____
###Markdown
Now that we have initialized a shared root for all experiments, we can start deriving child states from it depending on the scientific questions we are interested in asking about the model. To generate a child state, we need to act on the root with a `Recipe` which will specify the actions that turn the root into a new state (i.e. a new instance of `MyExperimentState` attached to the same `Experiment`). The `exp` object will track provenance of all experiment states. Say, for example, that the experiment we are interested in running here is the comparison between training the full model versus training a randomly pruned version of the same model. We will need two `Recipe`s: one that trains the model, one that prunes and then trains the model.
###Code
# For reproducibility purposes, let's go through the intermediate step of creating a SeededRecipe for seed handling
class SeededRecipe(dg.Recipe):
def __call__(self, experiment_state):
self.experiment_object = experiment_state.experiment_object
if not hasattr(self, "seed") or self.seed is None:
self.seed = self.experiment_object.root.seed
return super().__call__(experiment_state)
def set_seed(self):
set_reproducible_seed(self.seed) # defined at the top of the notebook
class TrainRecipe(SeededRecipe):
PROPERTIES = ["nb_epochs", "seed"]
def __init__(self, nb_epochs, seed=None):
self.nb_epochs = nb_epochs
self.seed = seed
def run(self, new_state):
self.set_seed()
new_state.model.train()
loss_func = torch.nn.CrossEntropyLoss()
# Training loop
for epoch_n in range(self.nb_epochs):
print('Epoch: {}'.format(epoch_n))
for batch_n, (X, y) in enumerate(new_state.train_dataloader):
# train
new_state.optimizer.zero_grad()
yhat = new_state.model(X)
loss = loss_func(yhat, y)
loss.backward()
new_state.optimizer.step()
return new_state
class PruneRecipe(SeededRecipe):
PROPERTIES = ["schema", "seed"]
def __init__(self, schema, seed=None):
self.schema = schema
self.seed = seed
def run(self, new_state):
self.set_seed()
def _prune_model(model, pruning_schema):
"""Use the instructions in the pruning schema to apply the correct pruning
function to the correct tensor within the correct module.
This uses `named_modules` to get the module object.
"""
for (module_name, tensor_name), pruning_fn in pruning_schema.items():
# use the module name to extract the module from the model
if module_name not in dict(model.named_modules()):
raise KeyError(
"Module {} not found. Available modules: {}".format(
module_name, dict(model.named_modules()).keys()
)
)
module = dict(model.named_modules())[module_name]
# now that we have both module and tensor_name, we prune them
pruning_fn(module, tensor_name)
_prune_model(
model=new_state.model,
pruning_schema=self.schema,
)
return new_state
###Output
_____no_output_____
###Markdown
As defined above, a PruningRecipe will need a pruning schema that will provide instructions on how to prune each parameter in the model. Since we only want to prune weights (and not biases) at random by removing 50% of their connections, the schema below will do.
###Code
import torch.nn.utils.prune as prune
pruning_schema = {
("conv1", "weight"): partial(prune.random_unstructured, amount=0.5),
("conv2", "weight"): partial(prune.random_unstructured, amount=0.5),
("fc1", "weight"): partial(prune.random_unstructured, amount=0.5),
("fc2", "weight"): partial(prune.random_unstructured, amount=0.5),
("fc3", "weight"): partial(prune.random_unstructured, amount=0.5),
}
a = TrainRecipe(nb_epochs=1)(x)
b = TrainRecipe(nb_epochs=1)(
PruneRecipe(schema=pruning_schema)(x)
)
exp.leaves
exp.run()
###Output
INFO:dagger:No cached state at: /private/home/michela/dagger/tutorials/example/48d6fa140909a216a8bb4cf557ac922c
DEBUG:dagger:Saving to: /private/home/michela/dagger/tutorials/example/48d6fa140909a216a8bb4cf557ac922c
INFO:dagger:No cached state at: /private/home/michela/dagger/tutorials/example/22f54778d497611f21fa6f998f270572
###Markdown
Analysis Now that the experiment has been run, one could even safely close and restart this notebook to simulate the experience of analyzing the results at a later point in time. The key point of `dagger` is that it powers quick analysis of complex experiment results through an intuitive API that allows users to visualize and filter experiment states by tag, level, etc.
###Code
import dagger as dg
###Output
_____no_output_____
###Markdown
Restore an experiment from disk by specifying its location and the type of experiment states it contains
###Code
exp = dg.Experiment.restore(directory='./example/', state_class=MyExperimentState)
###Output
_____no_output_____
###Markdown
Draw the experiment tree for visual inspection
###Code
exp.graph.draw(filename='./example/graph', format='png', view=True)
###Output
DEBUG:graphviz.files:write 949 bytes to './example/graph'
DEBUG:graphviz.backend:run ['dot', '-Tpng', '-O', 'graph']
DEBUG:graphviz.files:delete './example/graph'
###Markdown
This tree clearly shows all experiment states in the experiment with their associated SHAs. The root is easy to identify (level=0). All other experiment states originate from the root and are connected in a DAG-like fashion.One one side we have the state that corresponds to simply training the model that was initialized in the root; on the other side, we have a state that corresponds to first pruning the model that was initialized in the root, and then training that pruned model. It is true, however, that, unless one already knows the exact logic that went into the generation of these experiment states, it is very hard from this visualization to know exactly what each state represents. Thankfully, `dagger` easily solves the problem by letting you add `tags` to each state to easily identify and filter them. This can be achieved by simply encapsulating the recipes with `with` statements:
###Code
with exp.tag("train1epoch"):
c = TrainRecipe(nb_epochs=1, seed=123)(exp.root.to_promise())
# This new node will be attached to the root, in this example, but can be attached to any other node
# Note: remember to transform the node to a dagger.ExperimentStatePromise so that dask can run the new graph
exp.run()
###Output
INFO:dagger:No cached state at: /private/home/michela/dagger/tutorials/example/048559c33aa862acc226c32a553bab4b
###Markdown
Now that the new node has been attached, let's restore and replot the experiment to see how this need node (and its `tags`) appear
###Code
exp = dg.Experiment.restore(directory='./example/', state_class=MyExperimentState)
exp.graph.draw(filename='./example/graph', format='png', view=True)
###Output
DEBUG:graphviz.files:write 1243 bytes to './example/graph'
DEBUG:graphviz.backend:run ['dot', '-Tpng', '-O', 'graph']
DEBUG:graphviz.files:delete './example/graph'
###Markdown
Now that we have a full experiment tree, as an example analysis, let's evaluate some simple metrics (such as accuracy) for the models at all leaf states. First, define the evaluation function:
###Code
def eval_model(model, loader, eval_fn=None, device=torch.device("cpu")):
"""Main evaluation loop for a model. Allows for configurable metric."""
# Default eval metric: accuracy
if not eval_fn:
def eval_fn(y_true, y_pred):
return (y_pred.argmax(-1) == y_true).sum().item()
training = model.training
# Eval loop
model.eval()
total_examples = len(loader.dataset)
total_metric = 0
model = model.to(device=device)
with torch.no_grad():
for X, y in loader:
X = X.to(device=device)
y = y.to(device=device)
y_pred = model(X)
batch_metric = eval_fn(y_true=y, y_pred=y_pred)
if hasattr(batch_metric, "item"):
batch_metric = batch_metric.item()
total_metric += batch_metric
# Reset the model to training mode if that's the mode it was initially found in
if training:
model.train()
return float(total_metric) / total_examples
###Output
_____no_output_____
###Markdown
Second, identify all leaf states by looping through the experiment's `edge_map` and selecting the nodes without children (the leaves, by definition).
###Code
leaves = [node for node, children in exp.graph.edge_map.items() if not children]
# Reload all leaves into memory one by one and evaluate the accuracy of their models
for node, state in exp.graph.node_map.items():
if node in leaves:
with state.lazy_load():
print('Node {}, Accuracy {}'.format(
node,
eval_model(state.model, state.test_dataloader, device=torch.device("cuda"))
))
###Output
INFO:dagger:State already exists at /private/home/michela/dagger/tutorials/example/22f54778d497611f21fa6f998f270572! Safely loading existing state.
INFO:dagger:State already exists at /private/home/michela/dagger/tutorials/example/d296b6e17e99ef92fd6c728507198d69! Safely loading existing state.
###Markdown
As a sanity check, the states with the tag and without the tag that are otherwise identical (1 epoch of training from the same initialization) return the same accuracy. The code in the previous cell can also be expressed by making use of a `dagger.Function`:
###Code
evaluate_acc = dg.Function(
lambda s: print('Node {}, Accuracy {}'.format(
s.sha(),
eval_model(s.model, s.test_dataloader, device=torch.device("cuda"))
))
)
for node, state in exp.graph.node_map.items():
if node in leaves:
evaluate_acc(state.to_promise())
exp.run()
###Output
INFO:dagger:State already exists at /private/home/michela/dagger/tutorials/example/048559c33aa862acc226c32a553bab4b! Safely loading existing state.
INFO:dagger:State already exists at /private/home/michela/dagger/tutorials/example/22f54778d497611f21fa6f998f270572! Safely loading existing state.
###Markdown
Let's dig a bit deeper into all of the functionalities provided by `dagger` at the analysis stage.
###Code
type(exp.graph)
###Output
_____no_output_____
###Markdown
The experiment graph is an instance of the `dagger` `StaticExperimentTree` class, which enables easy inspection of all experimental dependencies. Let's see how. The list of all experiment states in the tree is wrapped in the `dagger` `NodeSet` inner class which allows for compositional filtering of states in the tree by `tag`.
###Code
exp.graph.nodes
type(exp.graph.nodes)
exp.graph.root # easily access the root
exp.graph.node('62bee8c1ddf044584494779cf50e344a-root') # select state by sha
exp.graph.nodes.filter('train*epoch') # filter states by tag
exp.graph.nodes_at_distance(1) # filter states by distance from the root
exp.graph.to_graphviz() # plot experiment tree without saving
###Output
DEBUG:graphviz.backend:run ['dot', '-Tsvg']
###Markdown
It is worth noting that, at this point, **none of the experiments (with related models, masks, dataloaders, etc) is fully loaded into memory**. If that were the case, no large experiments with multiple states could easily be analyzed due to memory constraints. Instead, only their minimal ("slim") version is currently loaded. This allows us to access only the basic info needed to reconstruct the tree dependencies (children, parents, shas, tags). Let's pick any state and call it `s`.
###Code
s = exp.graph.nodes[0]
s.slim_loaded
s.__dict__
###Output
_____no_output_____
###Markdown
To fully load an experiment state (node) in memory for full analysis, we need to restore it:
###Code
s.restore()
s.slim_loaded # it is now fully loaded
s = exp.graph.nodes[0]
s.__dict__
###Output
_____no_output_____
###Markdown
To remove the state from memory once you're done using it, make sure to deflate it:
###Code
s.deflate()
s.slim_loaded
###Output
_____no_output_____
###Markdown
We suggest using this with context managers (`with` statements, or the `dagger` provided `iterator`):
###Code
for state in exp.graph.nodes.iterator:
print(state.slim_loaded)
###Output
INFO:dagger:State already exists at /private/home/michela/dagger/tutorials/example/62bee8c1ddf044584494779cf50e344a-root! Safely loading existing state.
INFO:dagger:State already exists at /private/home/michela/dagger/tutorials/example/48d6fa140909a216a8bb4cf557ac922c! Safely loading existing state.
INFO:dagger:State already exists at /private/home/michela/dagger/tutorials/example/22f54778d497611f21fa6f998f270572! Safely loading existing state.
INFO:dagger:State already exists at /private/home/michela/dagger/tutorials/example/d296b6e17e99ef92fd6c728507198d69! Safely loading existing state.
###Markdown
In fact, a safer way to restore the state, instead of using `restore` and then having to remember to `deflate`, is to `lazy_load` the state (which is what `iterator` above is making use of):
###Code
s.slim_loaded
with s.lazy_load():
print(s.slim_loaded)
s.slim_loaded
###Output
_____no_output_____ |
examples/example_submission_systems.ipynb | ###Markdown
Submission Systems Submission system play an important role, if you want to develop your pygromos code. Many times, they are hidden in the Simulation_runner blocks. But maybe you want to develop something, where you need direct access on the submission system? This notebook will give you some examples, how you can use the submission systems.Note that all submission systems are write in the same ways, such you can exchange them quickly.
###Code
from pygromos.simulations.hpc_queuing.submission_systems import local # this executes your code in your local session.
from pygromos.simulations.hpc_queuing.submission_systems import lsf # this module can be used to submit to the lsf-queue (e.g. on euler)
from pygromos.simulations.hpc_queuing.submission_systems import dummy # this is a dummy system, that only prints the commands
###Output
_____no_output_____
###Markdown
Local SubmissionThis system executes the commands directly in your current session. This allows you to locally test or execute your code. Maybe if your process needs much more time, you want later to switch to a submission system for job-queueing.
###Code
sub_local = local.LOCAL()
sub_local.verbose = True
bash_command = "sleep 2; echo \"WUHA\"; sleep 2"
job_id = sub_local.submit_to_queue(bash_command)
job_id
#This is a dummy function, to not break the code!
sub_local.get_jobs_from_queue("FUN")
###Output
Searching ID: FUN
###Markdown
LSF SubmissionThe Lsf submission system allows to submit jobs to the IBM LSF-Queueing system.**Careful! This part requires a running LSF-Queueing System on your System**You can submit and kill jobs and arrays to the queue, as well as getting information from the queuing list.
###Code
#Construct system:
sub_lsf = lsf.LSF(nmpi=1, job_duration = "24:00", max_storage=100)
sub_lsf.verbose = True
sub_lsf._refresh_job_queue_list_all_s = 0 #you must wait at least 1s to update job_queue list
###Output
_____no_output_____
###Markdown
Queue Checking:
###Code
sub_lsf.get_queued_jobs()
sub_lsf.job_queue_list
###Output
Skipping refresh of job list, as the last update is 0:00:00.005036s ago
###Markdown
Submission:here you can submit jobs to the queue as bash commands
###Code
bash_command = "sleep 5; echo \"WUHA\"; sleep 2"
job_name = "Test1"
job_id = sub_lsf.submit_to_queue(command=bash_command, jobName=job_name)
#search for the just submitted job in the queue
sub_lsf.search_queue_for_jobid(job_id)
sub_lsf.search_queue_for_jobname("Test1")
###Output
_____no_output_____
###Markdown
Submitting multiple jobs
###Code
bash_command = "sleep 2; echo \"WUHA\"; sleep 2"
job_ids = []
for test in range(3):
job_name = "Test"+str(test)
job_id = sub_lsf.submit_to_queue(command=bash_command, jobName=job_name)
job_ids.append(job_id)
sub_lsf.search_queue_for_jobname("Te", regex=True)
###Output
_____no_output_____
###Markdown
Killing a jobsRemove a job the job queue
###Code
sub_lsf.kill_jobs(job_ids=[job_id])
sub_lsf.search_queue_for_jobname("Te", regex=True)
###Output
_____no_output_____
###Markdown
Submission Systems Submission system play an important role, if you want to develop your pygromos code. Many times, they are hidden in the Simulation_runner blocks. But maybe you want to develop something, where you need direct access on the submission system? This notebook will give you some examples, how you can use the submission systems.Note that all submission systems are write in the same ways, such you can exchange them quickly.
###Code
from pygromos.simulations.hpc_queuing.submission_systems import local # this executes your code in your local session.
from pygromos.simulations.hpc_queuing.submission_systems import lsf # this module can be used to submit to the lsf-queue (e.g. on euler)
from pygromos.simulations.hpc_queuing.submission_systems import dummy # this is a dummy system, that only prints the commands
###Output
_____no_output_____
###Markdown
Local SubmissionThis system executes the commands directly in your current session. This allows you to locally test or execute your code. Maybe if your process needs much more time, you want later to switch to a submission system for job-queueing.
###Code
sub_local = local.LOCAL()
sub_local.verbose = True
bash_command = "sleep 2; echo \"WUHA\"; sleep 2"
job_id = sub_local.submit_to_queue(bash_command)
job_id
#This is a dummy function, to not break the code!
sub_local.get_jobs_from_queue("FUN")
###Output
Searching ID: FUN
###Markdown
LSF SubmissionThe Lsf submission system allows to submit jobs to the IBM LSF-Queueing system.**Careful! This part requires a running LSF-Queueing System on your System**You can submit and kill jobs and arrays to the queue, as well as getting information from the queuing list.
###Code
#Construct system:
sub_lsf = lsf.LSF(nmpi=1, job_duration = "24:00", max_storage=100)
sub_lsf.verbose = True
sub_lsf._refresh_job_queue_list_all_s = 0 #you must wait at least 1s to update job_queue list
###Output
_____no_output_____
###Markdown
Queue Checking:
###Code
sub_lsf.get_queued_jobs()
sub_lsf.job_queue_list
###Output
Skipping refresh of job list, as the last update is 0:00:00.005036s ago
###Markdown
Submission:here you can submit jobs to the queue as bash commands
###Code
bash_command = "sleep 5; echo \"WUHA\"; sleep 2"
job_name = "Test1"
job_id = sub_lsf.submit_to_queue(command=bash_command, jobName=job_name)
#search for the just submitted job in the queue
sub_lsf.search_queue_for_jobid(job_id)
sub_lsf.search_queue_for_jobname("Test1")
###Output
_____no_output_____
###Markdown
Submitting multiple jobs
###Code
bash_command = "sleep 2; echo \"WUHA\"; sleep 2"
job_ids = []
for test in range(3):
job_name = "Test"+str(test)
job_id = sub_lsf.submit_to_queue(command=bash_command, jobName=job_name)
job_ids.append(job_id)
sub_lsf.search_queue_for_jobname("Te", regex=True)
###Output
_____no_output_____
###Markdown
Killing a jobsRemove a job the job queue
###Code
sub_lsf.kill_jobs(job_ids=[job_id])
sub_lsf.search_queue_for_jobname("Te", regex=True)
###Output
_____no_output_____
###Markdown
Submission Systems Submission system play an important role, if you want to develop your pygromos code. Many times, they are hidden in the Simulation_runner blocks. But maybe you want to develop something, where you need direct access on the submission system? This notebook will give you some examples, how you can use the submission systems.Note that all submission systems are write in the same ways, such you can exchange them quickly.
###Code
from pygromos.hpc_queuing.submission_systems import local # this executes your code in your local session.
from pygromos.hpc_queuing.submission_systems import lsf # this module can be used to submit to the lsf-queue (e.g. on euler)
from pygromos.hpc_queuing.submission_systems import dummy # this is a dummy system, that only prints the commands
###Output
_____no_output_____
###Markdown
Local SubmissionThis system executes the commands directly in your current session. This allows you to locally test or execute your code. Maybe if your process needs much more time, you want later to switch to a submission system for job-queueing.
###Code
sub_local = local.LOCAL()
sub_local.verbose = True
bash_command = "sleep 2; echo \"WUHA\"; sleep 2"
job_id = sub_local.submit_to_queue(bash_command)
job_id
#This is a dummy function, to not break the code!
sub_local.get_jobs_from_queue("FUN")
###Output
Searching ID: FUN
###Markdown
LSF SubmissionThe Lsf submission system allows to submit jobs to the IBM LSF-Queueing system.**Careful! This part requires a running LSF-Queueing System on your System**You can submit and kill jobs and arrays to the queue, as well as getting information from the queuing list.
###Code
#Construct system:
sub_lsf = lsf.LSF(nmpi=1, job_duration = "24:00", max_storage=100)
sub_lsf.verbose = True
sub_lsf._refresh_job_queue_list_all_s = 0 #you must wait at least 1s to update job_queue list
###Output
_____no_output_____
###Markdown
Queue Checking:
###Code
sub_lsf.get_queued_jobs()
sub_lsf.job_queue_list
###Output
Skipping refresh of job list, as the last update is 0:00:00.005036s ago
###Markdown
Submission:here you can submit jobs to the queue as bash commands
###Code
bash_command = "sleep 5; echo \"WUHA\"; sleep 2"
job_name = "Test1"
job_id = sub_lsf.submit_to_queue(command=bash_command, jobName=job_name)
#search for the just submitted job in the queue
sub_lsf.search_queue_for_jobid(job_id)
sub_lsf.search_queue_for_jobname("Test1")
###Output
_____no_output_____
###Markdown
Submitting multiple jobs
###Code
bash_command = "sleep 2; echo \"WUHA\"; sleep 2"
job_ids = []
for test in range(3):
job_name = "Test"+str(test)
job_id = sub_lsf.submit_to_queue(command=bash_command, jobName=job_name)
job_ids.append(job_id)
sub_lsf.search_queue_for_jobname("Te", regex=True)
###Output
_____no_output_____
###Markdown
Killing a jobsRemove a job the job queue
###Code
sub_lsf.kill_jobs(job_ids=[job_id])
sub_lsf.search_queue_for_jobname("Te", regex=True)
###Output
_____no_output_____ |
Capsule Network basic - SampleDataset.ipynb | ###Markdown
Load data
###Code
import pickle
train_filename = "C:/Users/behl/Desktop/lung disease/train_data_sample_gray.p"
(train_labels, train_data, train_tensors) = pickle.load(open(train_filename, mode='rb'))
valid_filename = "C:/Users/behl/Desktop/lung disease/valid_data_sample_gray.p"
(valid_labels, valid_data, valid_tensors) = pickle.load(open(valid_filename, mode='rb'))
test_filename = "C:/Users/behl/Desktop/lung disease/test_data_sample_gray.p"
(test_labels, test_data, test_tensors) = pickle.load(open(test_filename, mode='rb'))
def onhotLabels(label):
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder()
enc.fit(label)
return enc.transform(label).toarray()
train_labels = onhotLabels(train_labels)
valid_labels = onhotLabels(valid_labels)
test_labels = onhotLabels(test_labels)
###Output
c:\users\behl\appdata\local\programs\python\python37\lib\site-packages\sklearn\preprocessing\_encoders.py:415: FutureWarning: The handling of integer data will change in version 0.22. Currently, the categories are determined based on the range [0, max(values)], while in the future they will be determined based on the unique values.
If you want the future behaviour and silence this warning, you can specify "categories='auto'".
In case you used a LabelEncoder before this OneHotEncoder to convert the categories to integers, then you can now use the OneHotEncoder directly.
warnings.warn(msg, FutureWarning)
c:\users\behl\appdata\local\programs\python\python37\lib\site-packages\sklearn\preprocessing\_encoders.py:415: FutureWarning: The handling of integer data will change in version 0.22. Currently, the categories are determined based on the range [0, max(values)], while in the future they will be determined based on the unique values.
If you want the future behaviour and silence this warning, you can specify "categories='auto'".
In case you used a LabelEncoder before this OneHotEncoder to convert the categories to integers, then you can now use the OneHotEncoder directly.
warnings.warn(msg, FutureWarning)
c:\users\behl\appdata\local\programs\python\python37\lib\site-packages\sklearn\preprocessing\_encoders.py:415: FutureWarning: The handling of integer data will change in version 0.22. Currently, the categories are determined based on the range [0, max(values)], while in the future they will be determined based on the unique values.
If you want the future behaviour and silence this warning, you can specify "categories='auto'".
In case you used a LabelEncoder before this OneHotEncoder to convert the categories to integers, then you can now use the OneHotEncoder directly.
warnings.warn(msg, FutureWarning)
###Markdown
CapsNet model
###Code
import os
import argparse
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import callbacks
import numpy as np
from keras import layers, models, optimizers
from keras import backend as K
import matplotlib.pyplot as plt
from PIL import Image
from capsulelayers import CapsuleLayer, PrimaryCap, Length, Mask
def CapsNet(input_shape, n_class, routings):
"""
A Capsule Network on MNIST.
:param input_shape: data shape, 3d, [width, height, channels]
:param n_class: number of classes
:param routings: number of routing iterations
:return: Two Keras Models, the first one used for training, and the second one for evaluation.
`eval_model` can also be used for training.
"""
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
primarycaps = PrimaryCap(conv1, dim_capsule=8, n_channels=32, kernel_size=9, strides=2, padding='valid')
# Layer 3: Capsule layer. Routing algorithm works here.
digitcaps = CapsuleLayer(num_capsule=n_class, dim_capsule=16, routings=routings,
name='digitcaps')(primarycaps)
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
# If using tensorflow, this will not be necessary. :)
out_caps = Length(name='capsnet')(digitcaps)
# Decoder network.
y = layers.Input(shape=(n_class,))
masked_by_y = Mask()([digitcaps, y]) # The true label is used to mask the output of capsule layer. For training
masked = Mask()(digitcaps) # Mask using the capsule with maximal length. For prediction
# Shared Decoder model in training and prediction
decoder = models.Sequential(name='decoder')
decoder.add(layers.Dense(512, activation='relu', input_dim=16*n_class))
decoder.add(layers.Dense(1024, activation='relu'))
decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))
# Models for training and evaluation (prediction)
train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
eval_model = models.Model(x, [out_caps, decoder(masked)])
# manipulate model
noise = layers.Input(shape=(n_class, 16))
noised_digitcaps = layers.Add()([digitcaps, noise])
masked_noised_y = Mask()([noised_digitcaps, y])
manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
model, eval_model, manipulate_model = CapsNet(input_shape=train_tensors.shape[1:],
n_class=len(np.unique(train_labels)),
routings=4)
decoder.summary()
model.summary()
from keras import backend as K
def binary_accuracy(y_true, y_pred):
return K.mean(K.equal(y_true, K.round(y_pred)))
def precision_threshold(threshold = 0.5):
def precision(y_true, y_pred):
threshold_value = threshold
y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value), K.floatx())
true_positives = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(y_pred)
precision_ratio = true_positives / (predicted_positives + K.epsilon())
return precision_ratio
return precision
def recall_threshold(threshold = 0.5):
def recall(y_true, y_pred):
threshold_value = threshold
y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value), K.floatx())
true_positives = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.clip(y_true, 0, 1))
recall_ratio = true_positives / (possible_positives + K.epsilon())
return recall_ratio
return recall
def fbeta_score_threshold(beta = 1, threshold = 0.5):
def fbeta_score(y_true, y_pred):
threshold_value = threshold
beta_value = beta
p = precision_threshold(threshold_value)(y_true, y_pred)
r = recall_threshold(threshold_value)(y_true, y_pred)
bb = beta_value ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
return fbeta_score
def margin_loss(y_true, y_pred):
"""
Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
:param y_true: [None, n_classes]
:param y_pred: [None, num_capsule]
:return: a scalar loss value.
"""
L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))
return K.mean(K.sum(L, 1))
def train(model, data, lr, lr_decay, lam_recon, batch_size, shift_fraction, epochs):
"""
Training a CapsuleNet
:param model: the CapsuleNet model
:param data: a tuple containing training and testing data, like `((x_train, y_train), (x_test, y_test))`
:param args: arguments
:return: The trained model
"""
# unpacking the data
(x_train, y_train), (x_test, y_test) = data
# callbacks
log = callbacks.CSVLogger('saved_models/CapsNet_log.csv')
tb = callbacks.TensorBoard(log_dir='saved_models/tensorboard-logs',
batch_size=batch_size, histogram_freq=0)
checkpoint = callbacks.ModelCheckpoint(filepath='saved_models/CapsNet.best.from_scratch.hdf5',
verbose=1, save_best_only=True)
cb_lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: lr * (lr_decay ** epoch))
# compile the model
model.compile(optimizer='sgd', loss='binary_crossentropy',
metrics=[precision_threshold(threshold = 0.5),
recall_threshold(threshold = 0.5),
fbeta_score_threshold(beta=0.5, threshold = 0.5),
'accuracy'])
# Training without data augmentation:
# model.fit([x_train, y_train], [y_train, x_train], batch_size=batch_size, epochs=epochs,
# validation_data=[[x_test, y_test], [y_test, x_test]], callbacks=[log, tb, checkpoint, cb_lr_decay])
# Begin: Training with data augmentation ---------------------------------------------------------------------#
def train_generator(x, y, batch_size, shift_fraction=0.):
train_datagen = ImageDataGenerator(width_shift_range=shift_fraction,
height_shift_range=shift_fraction) # shift up to 2 pixel for MNIST
generator = train_datagen.flow(x, y, batch_size=batch_size)
while 1:
x_batch, y_batch = generator.next()
yield ([x_batch, y_batch], [y_batch, x_batch])
# Training with data augmentation. If shift_fraction=0., also no augmentation.
model.fit_generator(generator=train_generator(x_train, y_train, batch_size, shift_fraction),
steps_per_epoch=int(y_train.shape[0] / batch_size),
epochs=epochs,
validation_data=[[x_test, y_test], [y_test, x_test]],
callbacks=[log, tb, checkpoint, cb_lr_decay])
# End: Training with data augmentation -----------------------------------------------------------------------#
from utils import plot_log
plot_log('saved_models/CapsNet_log.csv', show=True)
return model
train(model=model, data=((train_tensors, train_labels), (valid_tensors, valid_labels)),
lr=0.001, lr_decay=0.9, lam_recon=0.392, batch_size=32, shift_fraction=0.1, epochs=20)
###Output
Epoch 1/20
105/106 [============================>.] - ETA: 0s - loss: 1.3930 - capsnet_loss: 0.6998 - decoder_loss: 0.6932 - capsnet_precision: 0.5239 - capsnet_recall: 0.5429 - capsnet_fbeta_score: 0.5255 - capsnet_acc: 0.5246 - decoder_precision: 0.4958 - decoder_recall: 0.5000 - decoder_fbeta_score: 0.4965 - decoder_acc: 0.0140Epoch 00001: val_loss improved from inf to 1.38791, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 706ms/step - loss: 1.3927 - capsnet_loss: 0.6995 - decoder_loss: 0.6932 - capsnet_precision: 0.5240 - capsnet_recall: 0.5425 - capsnet_fbeta_score: 0.5255 - capsnet_acc: 0.5246 - decoder_precision: 0.4957 - decoder_recall: 0.5000 - decoder_fbeta_score: 0.4964 - decoder_acc: 0.0141 - val_loss: 1.3879 - val_capsnet_loss: 0.6947 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5193 - val_capsnet_recall: 0.5609 - val_capsnet_fbeta_score: 0.5261 - val_capsnet_acc: 0.5205 - val_decoder_precision: 0.4936 - val_decoder_recall: 0.4995 - val_decoder_fbeta_score: 0.4947 - val_decoder_acc: 0.0134
Epoch 2/20
105/106 [============================>.] - ETA: 0s - loss: 1.3884 - capsnet_loss: 0.6952 - decoder_loss: 0.6932 - capsnet_precision: 0.5248 - capsnet_recall: 0.5390 - capsnet_fbeta_score: 0.5255 - capsnet_acc: 0.5240 - decoder_precision: 0.4951 - decoder_recall: 0.5003 - decoder_fbeta_score: 0.4960 - decoder_acc: 0.0144Epoch 00002: val_loss improved from 1.38791 to 1.38579, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 704ms/step - loss: 1.3887 - capsnet_loss: 0.6955 - decoder_loss: 0.6932 - capsnet_precision: 0.5251 - capsnet_recall: 0.5389 - capsnet_fbeta_score: 0.5257 - capsnet_acc: 0.5242 - decoder_precision: 0.4952 - decoder_recall: 0.5003 - decoder_fbeta_score: 0.4961 - decoder_acc: 0.0144 - val_loss: 1.3858 - val_capsnet_loss: 0.6926 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5335 - val_capsnet_recall: 0.5736 - val_capsnet_fbeta_score: 0.5403 - val_capsnet_acc: 0.5350 - val_decoder_precision: 0.4935 - val_decoder_recall: 0.4998 - val_decoder_fbeta_score: 0.4947 - val_decoder_acc: 0.0134
Epoch 3/20
105/106 [============================>.] - ETA: 0s - loss: 1.3855 - capsnet_loss: 0.6923 - decoder_loss: 0.6932 - capsnet_precision: 0.5366 - capsnet_recall: 0.5429 - capsnet_fbeta_score: 0.5357 - capsnet_acc: 0.5366 - decoder_precision: 0.4947 - decoder_recall: 0.5006 - decoder_fbeta_score: 0.4958 - decoder_acc: 0.0144Epoch 00003: val_loss improved from 1.38579 to 1.38230, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 705ms/step - loss: 1.3858 - capsnet_loss: 0.6926 - decoder_loss: 0.6932 - capsnet_precision: 0.5361 - capsnet_recall: 0.5422 - capsnet_fbeta_score: 0.5352 - capsnet_acc: 0.5361 - decoder_precision: 0.4948 - decoder_recall: 0.5006 - decoder_fbeta_score: 0.4959 - decoder_acc: 0.0143 - val_loss: 1.3823 - val_capsnet_loss: 0.6891 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5379 - val_capsnet_recall: 0.5927 - val_capsnet_fbeta_score: 0.5471 - val_capsnet_acc: 0.5414 - val_decoder_precision: 0.4935 - val_decoder_recall: 0.5000 - val_decoder_fbeta_score: 0.4947 - val_decoder_acc: 0.0134
Epoch 4/20
105/106 [============================>.] - ETA: 0s - loss: 1.3836 - capsnet_loss: 0.6904 - decoder_loss: 0.6932 - capsnet_precision: 0.5364 - capsnet_recall: 0.5399 - capsnet_fbeta_score: 0.5356 - capsnet_acc: 0.5354 - decoder_precision: 0.4955 - decoder_recall: 0.5006 - decoder_fbeta_score: 0.4965 - decoder_acc: 0.0139Epoch 00004: val_loss improved from 1.38230 to 1.37998, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 706ms/step - loss: 1.3836 - capsnet_loss: 0.6904 - decoder_loss: 0.6932 - capsnet_precision: 0.5365 - capsnet_recall: 0.5416 - capsnet_fbeta_score: 0.5360 - capsnet_acc: 0.5357 - decoder_precision: 0.4956 - decoder_recall: 0.5006 - decoder_fbeta_score: 0.4965 - decoder_acc: 0.0140 - val_loss: 1.3800 - val_capsnet_loss: 0.6868 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5505 - val_capsnet_recall: 0.5827 - val_capsnet_fbeta_score: 0.5558 - val_capsnet_acc: 0.5514 - val_decoder_precision: 0.4935 - val_decoder_recall: 0.4998 - val_decoder_fbeta_score: 0.4947 - val_decoder_acc: 0.0133
Epoch 5/20
105/106 [============================>.] - ETA: 0s - loss: 1.3824 - capsnet_loss: 0.6892 - decoder_loss: 0.6932 - capsnet_precision: 0.5426 - capsnet_recall: 0.5467 - capsnet_fbeta_score: 0.5415 - capsnet_acc: 0.5408 - decoder_precision: 0.4951 - decoder_recall: 0.5004 - decoder_fbeta_score: 0.4961 - decoder_acc: 0.0138Epoch 00005: val_loss did not improve
106/106 [==============================] - 74s 702ms/step - loss: 1.3823 - capsnet_loss: 0.6891 - decoder_loss: 0.6932 - capsnet_precision: 0.5431 - capsnet_recall: 0.5463 - capsnet_fbeta_score: 0.5418 - capsnet_acc: 0.5411 - decoder_precision: 0.4951 - decoder_recall: 0.5004 - decoder_fbeta_score: 0.4961 - decoder_acc: 0.0139 - val_loss: 1.3801 - val_capsnet_loss: 0.6869 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5561 - val_capsnet_recall: 0.5482 - val_capsnet_fbeta_score: 0.5539 - val_capsnet_acc: 0.5545 - val_decoder_precision: 0.4935 - val_decoder_recall: 0.4997 - val_decoder_fbeta_score: 0.4946 - val_decoder_acc: 0.0133
Epoch 6/20
105/106 [============================>.] - ETA: 0s - loss: 1.3810 - capsnet_loss: 0.6878 - decoder_loss: 0.6932 - capsnet_precision: 0.5544 - capsnet_recall: 0.5488 - capsnet_fbeta_score: 0.5516 - capsnet_acc: 0.5513 - decoder_precision: 0.4948 - decoder_recall: 0.5006 - decoder_fbeta_score: 0.4958 - decoder_acc: 0.0144Epoch 00006: val_loss improved from 1.37998 to 1.37739, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 706ms/step - loss: 1.3809 - capsnet_loss: 0.6877 - decoder_loss: 0.6932 - capsnet_precision: 0.5544 - capsnet_recall: 0.5495 - capsnet_fbeta_score: 0.5517 - capsnet_acc: 0.5515 - decoder_precision: 0.4949 - decoder_recall: 0.5006 - decoder_fbeta_score: 0.4960 - decoder_acc: 0.0144 - val_loss: 1.3774 - val_capsnet_loss: 0.6842 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5613 - val_capsnet_recall: 0.5745 - val_capsnet_fbeta_score: 0.5635 - val_capsnet_acc: 0.5627 - val_decoder_precision: 0.4934 - val_decoder_recall: 0.4998 - val_decoder_fbeta_score: 0.4946 - val_decoder_acc: 0.0133
Epoch 7/20
105/106 [============================>.] - ETA: 0s - loss: 1.3769 - capsnet_loss: 0.6837 - decoder_loss: 0.6932 - capsnet_precision: 0.5619 - capsnet_recall: 0.5449 - capsnet_fbeta_score: 0.5571 - capsnet_acc: 0.5603 - decoder_precision: 0.4959 - decoder_recall: 0.5007 - decoder_fbeta_score: 0.4968 - decoder_acc: 0.0142Epoch 00007: val_loss improved from 1.37739 to 1.37663, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 707ms/step - loss: 1.3769 - capsnet_loss: 0.6837 - decoder_loss: 0.6932 - capsnet_precision: 0.5619 - capsnet_recall: 0.5460 - capsnet_fbeta_score: 0.5574 - capsnet_acc: 0.5604 - decoder_precision: 0.4960 - decoder_recall: 0.5007 - decoder_fbeta_score: 0.4969 - decoder_acc: 0.0143 - val_loss: 1.3766 - val_capsnet_loss: 0.6834 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5654 - val_capsnet_recall: 0.5855 - val_capsnet_fbeta_score: 0.5689 - val_capsnet_acc: 0.5673 - val_decoder_precision: 0.4934 - val_decoder_recall: 0.4999 - val_decoder_fbeta_score: 0.4947 - val_decoder_acc: 0.0133
Epoch 8/20
105/106 [============================>.] - ETA: 0s - loss: 1.3781 - capsnet_loss: 0.6849 - decoder_loss: 0.6932 - capsnet_precision: 0.5571 - capsnet_recall: 0.5503 - capsnet_fbeta_score: 0.5542 - capsnet_acc: 0.5563 - decoder_precision: 0.4962 - decoder_recall: 0.5008 - decoder_fbeta_score: 0.4970 - decoder_acc: 0.0142Epoch 00008: val_loss improved from 1.37663 to 1.37453, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 706ms/step - loss: 1.3780 - capsnet_loss: 0.6848 - decoder_loss: 0.6932 - capsnet_precision: 0.5576 - capsnet_recall: 0.5507 - capsnet_fbeta_score: 0.5547 - capsnet_acc: 0.5567 - decoder_precision: 0.4961 - decoder_recall: 0.5008 - decoder_fbeta_score: 0.4970 - decoder_acc: 0.0142 - val_loss: 1.3745 - val_capsnet_loss: 0.6813 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5692 - val_capsnet_recall: 0.5855 - val_capsnet_fbeta_score: 0.5720 - val_capsnet_acc: 0.5709 - val_decoder_precision: 0.4934 - val_decoder_recall: 0.5000 - val_decoder_fbeta_score: 0.4947 - val_decoder_acc: 0.0133
###Markdown
Testing
###Code
model.load_weights('saved_models/CapsNet.best.from_scratch.hdf5')
prediction = eval_model.predict(test_tensors)
threshold = 0.5
beta = 0.5
pre = K.eval(precision_threshold(threshold = threshold)(K.variable(value=test_labels),
K.variable(value=prediction[0])))
rec = K.eval(recall_threshold(threshold = threshold)(K.variable(value=test_labels),
K.variable(value=prediction[0])))
fsc = K.eval(fbeta_score_threshold(beta = beta, threshold = threshold)(K.variable(value=test_labels),
K.variable(value=prediction[0])))
print ("Precision: %f %%\nRecall: %f %%\nFscore: %f %%"% (pre, rec, fsc))
K.eval(binary_accuracy(K.variable(value=test_labels),
K.variable(value=prediction[0])))
prediction[:30]
###Output
_____no_output_____
###Markdown
Load data
###Code
import pickle
train_filename = "data_preprocessed/train_data_sample_gray.p"
(train_labels, train_data, train_tensors) = pickle.load(open(train_filename, mode='rb'))
valid_filename = "data_preprocessed/valid_data_sample_gray.p"
(valid_labels, valid_data, valid_tensors) = pickle.load(open(valid_filename, mode='rb'))
test_filename = "data_preprocessed/test_data_sample_gray.p"
(test_labels, test_data, test_tensors) = pickle.load(open(test_filename, mode='rb'))
def onhotLabels(label):
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder()
enc.fit(label)
return enc.transform(label).toarray()
train_labels = onhotLabels(train_labels)
valid_labels = onhotLabels(valid_labels)
test_labels = onhotLabels(test_labels)
###Output
_____no_output_____
###Markdown
CapsNet model
###Code
import os
import argparse
from keras.preprocessing.image import ImageDataGenerator
from keras import callbacks
import numpy as np
from keras import layers, models, optimizers
from keras import backend as K
import matplotlib.pyplot as plt
from PIL import Image
from capsulelayers import CapsuleLayer, PrimaryCap, Length, Mask
def CapsNet(input_shape, n_class, routings):
"""
A Capsule Network on MNIST.
:param input_shape: data shape, 3d, [width, height, channels]
:param n_class: number of classes
:param routings: number of routing iterations
:return: Two Keras Models, the first one used for training, and the second one for evaluation.
`eval_model` can also be used for training.
"""
x = layers.Input(shape=input_shape)
# Layer 1: Just a conventional Conv2D layer
conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu', name='conv1')(x)
# Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
primarycaps = PrimaryCap(conv1, dim_capsule=8, n_channels=32, kernel_size=9, strides=2, padding='valid')
# Layer 3: Capsule layer. Routing algorithm works here.
digitcaps = CapsuleLayer(num_capsule=n_class, dim_capsule=16, routings=routings,
name='digitcaps')(primarycaps)
# Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
# If using tensorflow, this will not be necessary. :)
out_caps = Length(name='capsnet')(digitcaps)
# Decoder network.
y = layers.Input(shape=(n_class,))
masked_by_y = Mask()([digitcaps, y]) # The true label is used to mask the output of capsule layer. For training
masked = Mask()(digitcaps) # Mask using the capsule with maximal length. For prediction
# Shared Decoder model in training and prediction
decoder = models.Sequential(name='decoder')
decoder.add(layers.Dense(512, activation='relu', input_dim=16*n_class))
decoder.add(layers.Dense(1024, activation='relu'))
decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))
# Models for training and evaluation (prediction)
train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
eval_model = models.Model(x, [out_caps, decoder(masked)])
# manipulate model
noise = layers.Input(shape=(n_class, 16))
noised_digitcaps = layers.Add()([digitcaps, noise])
masked_noised_y = Mask()([noised_digitcaps, y])
manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
return train_model, eval_model, manipulate_model
model, eval_model, manipulate_model = CapsNet(input_shape=train_tensors.shape[1:],
n_class=len(np.unique(train_labels)),
routings=4)
decoder.summary()
model.summary()
from keras import backend as K
def binary_accuracy(y_true, y_pred):
return K.mean(K.equal(y_true, K.round(y_pred)))
def precision_threshold(threshold = 0.5):
def precision(y_true, y_pred):
threshold_value = threshold
y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value), K.floatx())
true_positives = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(y_pred)
precision_ratio = true_positives / (predicted_positives + K.epsilon())
return precision_ratio
return precision
def recall_threshold(threshold = 0.5):
def recall(y_true, y_pred):
threshold_value = threshold
y_pred = K.cast(K.greater(K.clip(y_pred, 0, 1), threshold_value), K.floatx())
true_positives = K.round(K.sum(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.clip(y_true, 0, 1))
recall_ratio = true_positives / (possible_positives + K.epsilon())
return recall_ratio
return recall
def fbeta_score_threshold(beta = 1, threshold = 0.5):
def fbeta_score(y_true, y_pred):
threshold_value = threshold
beta_value = beta
p = precision_threshold(threshold_value)(y_true, y_pred)
r = recall_threshold(threshold_value)(y_true, y_pred)
bb = beta_value ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
return fbeta_score
def margin_loss(y_true, y_pred):
"""
Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.
:param y_true: [None, n_classes]
:param y_pred: [None, num_capsule]
:return: a scalar loss value.
"""
L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))
return K.mean(K.sum(L, 1))
def train(model, data, lr, lr_decay, lam_recon, batch_size, shift_fraction, epochs):
"""
Training a CapsuleNet
:param model: the CapsuleNet model
:param data: a tuple containing training and testing data, like `((x_train, y_train), (x_test, y_test))`
:param args: arguments
:return: The trained model
"""
# unpacking the data
(x_train, y_train), (x_test, y_test) = data
# callbacks
log = callbacks.CSVLogger('saved_models/CapsNet_log.csv')
tb = callbacks.TensorBoard(log_dir='saved_models/tensorboard-logs',
batch_size=batch_size, histogram_freq=0)
checkpoint = callbacks.ModelCheckpoint(filepath='saved_models/CapsNet.best.from_scratch.hdf5',
verbose=1, save_best_only=True)
cb_lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: lr * (lr_decay ** epoch))
# compile the model
model.compile(optimizer='sgd', loss='binary_crossentropy',
metrics=[precision_threshold(threshold = 0.5),
recall_threshold(threshold = 0.5),
fbeta_score_threshold(beta=0.5, threshold = 0.5),
'accuracy'])
# Training without data augmentation:
# model.fit([x_train, y_train], [y_train, x_train], batch_size=batch_size, epochs=epochs,
# validation_data=[[x_test, y_test], [y_test, x_test]], callbacks=[log, tb, checkpoint, cb_lr_decay])
# Begin: Training with data augmentation ---------------------------------------------------------------------#
def train_generator(x, y, batch_size, shift_fraction=0.):
train_datagen = ImageDataGenerator(width_shift_range=shift_fraction,
height_shift_range=shift_fraction) # shift up to 2 pixel for MNIST
generator = train_datagen.flow(x, y, batch_size=batch_size)
while 1:
x_batch, y_batch = generator.next()
yield ([x_batch, y_batch], [y_batch, x_batch])
# Training with data augmentation. If shift_fraction=0., also no augmentation.
model.fit_generator(generator=train_generator(x_train, y_train, batch_size, shift_fraction),
steps_per_epoch=int(y_train.shape[0] / batch_size),
epochs=epochs,
validation_data=[[x_test, y_test], [y_test, x_test]],
callbacks=[log, tb, checkpoint, cb_lr_decay])
# End: Training with data augmentation -----------------------------------------------------------------------#
from utils import plot_log
plot_log('saved_models/CapsNet_log.csv', show=True)
return model
train(model=model, data=((train_tensors, train_labels), (valid_tensors, valid_labels)),
lr=0.001, lr_decay=0.9, lam_recon=0.392, batch_size=32, shift_fraction=0.1, epochs=20)
###Output
Epoch 1/20
105/106 [============================>.] - ETA: 0s - loss: 1.3930 - capsnet_loss: 0.6998 - decoder_loss: 0.6932 - capsnet_precision: 0.5239 - capsnet_recall: 0.5429 - capsnet_fbeta_score: 0.5255 - capsnet_acc: 0.5246 - decoder_precision: 0.4958 - decoder_recall: 0.5000 - decoder_fbeta_score: 0.4965 - decoder_acc: 0.0140Epoch 00001: val_loss improved from inf to 1.38791, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 706ms/step - loss: 1.3927 - capsnet_loss: 0.6995 - decoder_loss: 0.6932 - capsnet_precision: 0.5240 - capsnet_recall: 0.5425 - capsnet_fbeta_score: 0.5255 - capsnet_acc: 0.5246 - decoder_precision: 0.4957 - decoder_recall: 0.5000 - decoder_fbeta_score: 0.4964 - decoder_acc: 0.0141 - val_loss: 1.3879 - val_capsnet_loss: 0.6947 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5193 - val_capsnet_recall: 0.5609 - val_capsnet_fbeta_score: 0.5261 - val_capsnet_acc: 0.5205 - val_decoder_precision: 0.4936 - val_decoder_recall: 0.4995 - val_decoder_fbeta_score: 0.4947 - val_decoder_acc: 0.0134
Epoch 2/20
105/106 [============================>.] - ETA: 0s - loss: 1.3884 - capsnet_loss: 0.6952 - decoder_loss: 0.6932 - capsnet_precision: 0.5248 - capsnet_recall: 0.5390 - capsnet_fbeta_score: 0.5255 - capsnet_acc: 0.5240 - decoder_precision: 0.4951 - decoder_recall: 0.5003 - decoder_fbeta_score: 0.4960 - decoder_acc: 0.0144Epoch 00002: val_loss improved from 1.38791 to 1.38579, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 704ms/step - loss: 1.3887 - capsnet_loss: 0.6955 - decoder_loss: 0.6932 - capsnet_precision: 0.5251 - capsnet_recall: 0.5389 - capsnet_fbeta_score: 0.5257 - capsnet_acc: 0.5242 - decoder_precision: 0.4952 - decoder_recall: 0.5003 - decoder_fbeta_score: 0.4961 - decoder_acc: 0.0144 - val_loss: 1.3858 - val_capsnet_loss: 0.6926 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5335 - val_capsnet_recall: 0.5736 - val_capsnet_fbeta_score: 0.5403 - val_capsnet_acc: 0.5350 - val_decoder_precision: 0.4935 - val_decoder_recall: 0.4998 - val_decoder_fbeta_score: 0.4947 - val_decoder_acc: 0.0134
Epoch 3/20
105/106 [============================>.] - ETA: 0s - loss: 1.3855 - capsnet_loss: 0.6923 - decoder_loss: 0.6932 - capsnet_precision: 0.5366 - capsnet_recall: 0.5429 - capsnet_fbeta_score: 0.5357 - capsnet_acc: 0.5366 - decoder_precision: 0.4947 - decoder_recall: 0.5006 - decoder_fbeta_score: 0.4958 - decoder_acc: 0.0144Epoch 00003: val_loss improved from 1.38579 to 1.38230, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 705ms/step - loss: 1.3858 - capsnet_loss: 0.6926 - decoder_loss: 0.6932 - capsnet_precision: 0.5361 - capsnet_recall: 0.5422 - capsnet_fbeta_score: 0.5352 - capsnet_acc: 0.5361 - decoder_precision: 0.4948 - decoder_recall: 0.5006 - decoder_fbeta_score: 0.4959 - decoder_acc: 0.0143 - val_loss: 1.3823 - val_capsnet_loss: 0.6891 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5379 - val_capsnet_recall: 0.5927 - val_capsnet_fbeta_score: 0.5471 - val_capsnet_acc: 0.5414 - val_decoder_precision: 0.4935 - val_decoder_recall: 0.5000 - val_decoder_fbeta_score: 0.4947 - val_decoder_acc: 0.0134
Epoch 4/20
105/106 [============================>.] - ETA: 0s - loss: 1.3836 - capsnet_loss: 0.6904 - decoder_loss: 0.6932 - capsnet_precision: 0.5364 - capsnet_recall: 0.5399 - capsnet_fbeta_score: 0.5356 - capsnet_acc: 0.5354 - decoder_precision: 0.4955 - decoder_recall: 0.5006 - decoder_fbeta_score: 0.4965 - decoder_acc: 0.0139Epoch 00004: val_loss improved from 1.38230 to 1.37998, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 706ms/step - loss: 1.3836 - capsnet_loss: 0.6904 - decoder_loss: 0.6932 - capsnet_precision: 0.5365 - capsnet_recall: 0.5416 - capsnet_fbeta_score: 0.5360 - capsnet_acc: 0.5357 - decoder_precision: 0.4956 - decoder_recall: 0.5006 - decoder_fbeta_score: 0.4965 - decoder_acc: 0.0140 - val_loss: 1.3800 - val_capsnet_loss: 0.6868 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5505 - val_capsnet_recall: 0.5827 - val_capsnet_fbeta_score: 0.5558 - val_capsnet_acc: 0.5514 - val_decoder_precision: 0.4935 - val_decoder_recall: 0.4998 - val_decoder_fbeta_score: 0.4947 - val_decoder_acc: 0.0133
Epoch 5/20
105/106 [============================>.] - ETA: 0s - loss: 1.3824 - capsnet_loss: 0.6892 - decoder_loss: 0.6932 - capsnet_precision: 0.5426 - capsnet_recall: 0.5467 - capsnet_fbeta_score: 0.5415 - capsnet_acc: 0.5408 - decoder_precision: 0.4951 - decoder_recall: 0.5004 - decoder_fbeta_score: 0.4961 - decoder_acc: 0.0138Epoch 00005: val_loss did not improve
106/106 [==============================] - 74s 702ms/step - loss: 1.3823 - capsnet_loss: 0.6891 - decoder_loss: 0.6932 - capsnet_precision: 0.5431 - capsnet_recall: 0.5463 - capsnet_fbeta_score: 0.5418 - capsnet_acc: 0.5411 - decoder_precision: 0.4951 - decoder_recall: 0.5004 - decoder_fbeta_score: 0.4961 - decoder_acc: 0.0139 - val_loss: 1.3801 - val_capsnet_loss: 0.6869 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5561 - val_capsnet_recall: 0.5482 - val_capsnet_fbeta_score: 0.5539 - val_capsnet_acc: 0.5545 - val_decoder_precision: 0.4935 - val_decoder_recall: 0.4997 - val_decoder_fbeta_score: 0.4946 - val_decoder_acc: 0.0133
Epoch 6/20
105/106 [============================>.] - ETA: 0s - loss: 1.3810 - capsnet_loss: 0.6878 - decoder_loss: 0.6932 - capsnet_precision: 0.5544 - capsnet_recall: 0.5488 - capsnet_fbeta_score: 0.5516 - capsnet_acc: 0.5513 - decoder_precision: 0.4948 - decoder_recall: 0.5006 - decoder_fbeta_score: 0.4958 - decoder_acc: 0.0144Epoch 00006: val_loss improved from 1.37998 to 1.37739, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 706ms/step - loss: 1.3809 - capsnet_loss: 0.6877 - decoder_loss: 0.6932 - capsnet_precision: 0.5544 - capsnet_recall: 0.5495 - capsnet_fbeta_score: 0.5517 - capsnet_acc: 0.5515 - decoder_precision: 0.4949 - decoder_recall: 0.5006 - decoder_fbeta_score: 0.4960 - decoder_acc: 0.0144 - val_loss: 1.3774 - val_capsnet_loss: 0.6842 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5613 - val_capsnet_recall: 0.5745 - val_capsnet_fbeta_score: 0.5635 - val_capsnet_acc: 0.5627 - val_decoder_precision: 0.4934 - val_decoder_recall: 0.4998 - val_decoder_fbeta_score: 0.4946 - val_decoder_acc: 0.0133
Epoch 7/20
105/106 [============================>.] - ETA: 0s - loss: 1.3769 - capsnet_loss: 0.6837 - decoder_loss: 0.6932 - capsnet_precision: 0.5619 - capsnet_recall: 0.5449 - capsnet_fbeta_score: 0.5571 - capsnet_acc: 0.5603 - decoder_precision: 0.4959 - decoder_recall: 0.5007 - decoder_fbeta_score: 0.4968 - decoder_acc: 0.0142Epoch 00007: val_loss improved from 1.37739 to 1.37663, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 707ms/step - loss: 1.3769 - capsnet_loss: 0.6837 - decoder_loss: 0.6932 - capsnet_precision: 0.5619 - capsnet_recall: 0.5460 - capsnet_fbeta_score: 0.5574 - capsnet_acc: 0.5604 - decoder_precision: 0.4960 - decoder_recall: 0.5007 - decoder_fbeta_score: 0.4969 - decoder_acc: 0.0143 - val_loss: 1.3766 - val_capsnet_loss: 0.6834 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5654 - val_capsnet_recall: 0.5855 - val_capsnet_fbeta_score: 0.5689 - val_capsnet_acc: 0.5673 - val_decoder_precision: 0.4934 - val_decoder_recall: 0.4999 - val_decoder_fbeta_score: 0.4947 - val_decoder_acc: 0.0133
Epoch 8/20
105/106 [============================>.] - ETA: 0s - loss: 1.3781 - capsnet_loss: 0.6849 - decoder_loss: 0.6932 - capsnet_precision: 0.5571 - capsnet_recall: 0.5503 - capsnet_fbeta_score: 0.5542 - capsnet_acc: 0.5563 - decoder_precision: 0.4962 - decoder_recall: 0.5008 - decoder_fbeta_score: 0.4970 - decoder_acc: 0.0142Epoch 00008: val_loss improved from 1.37663 to 1.37453, saving model to saved_models/CapsNet.best.from_scratch.hdf5
106/106 [==============================] - 75s 706ms/step - loss: 1.3780 - capsnet_loss: 0.6848 - decoder_loss: 0.6932 - capsnet_precision: 0.5576 - capsnet_recall: 0.5507 - capsnet_fbeta_score: 0.5547 - capsnet_acc: 0.5567 - decoder_precision: 0.4961 - decoder_recall: 0.5008 - decoder_fbeta_score: 0.4970 - decoder_acc: 0.0142 - val_loss: 1.3745 - val_capsnet_loss: 0.6813 - val_decoder_loss: 0.6932 - val_capsnet_precision: 0.5692 - val_capsnet_recall: 0.5855 - val_capsnet_fbeta_score: 0.5720 - val_capsnet_acc: 0.5709 - val_decoder_precision: 0.4934 - val_decoder_recall: 0.5000 - val_decoder_fbeta_score: 0.4947 - val_decoder_acc: 0.0133
###Markdown
Testing
###Code
model.load_weights('saved_models/CapsNet.best.from_scratch.hdf5')
prediction = eval_model.predict(test_tensors)
threshold = 0.5
beta = 0.5
pre = K.eval(precision_threshold(threshold = threshold)(K.variable(value=test_labels),
K.variable(value=prediction[0])))
rec = K.eval(recall_threshold(threshold = threshold)(K.variable(value=test_labels),
K.variable(value=prediction[0])))
fsc = K.eval(fbeta_score_threshold(beta = beta, threshold = threshold)(K.variable(value=test_labels),
K.variable(value=prediction[0])))
print ("Precision: %f %%\nRecall: %f %%\nFscore: %f %%"% (pre, rec, fsc))
K.eval(binary_accuracy(K.variable(value=test_labels),
K.variable(value=prediction[0])))
prediction[:30]
###Output
_____no_output_____ |
Natural Language Processing in TensorFlow/Week 1 Sentiment in text/Course_3_Week_1_Lesson_1.ipynb | ###Markdown
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.keras.preprocessing.text import Tokenizer
sentences = [
'i love my dog',
'I, love my cat',
'You love my dog!'
]
tokenizer = Tokenizer(num_words = 100)
tokenizer.fit_on_texts(sentences)
word_index = tokenizer.word_index
print(word_index)
###Output
{'love': 1, 'my': 2, 'i': 3, 'dog': 4, 'cat': 5, 'you': 6}
|
archive/Alg_Comparison/Domain_CA_multidimensional_sinewave_plot_explore.ipynb | ###Markdown
Imports and Set-Up
###Code
!pip3 install higher
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
import torch
from torch.autograd import Variable
import random
from higher import innerloop_ctx
import warnings
#The code includes extensive warnings when run so have used this to ignore them
warnings.filterwarnings("ignore")
#Set random seeds for reproducibility of results
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
# set GPU or CPU depending on available hardware
# help from: https://stackoverflow.com/questions/46704352/porting-pytorch-code-from-cpu-to-gpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Available device: {device}")
if device == "cuda:0":
# set default so all tensors are on GPU, if available
# help from: https://stackoverflow.com/questions/46704352/porting-pytorch-code-from-cpu-to-gpu
torch.set_default_tensor_type('torch.cuda.FloatTensor')
domain_type = "multidim_sine"
###Output
Requirement already satisfied: higher in /Users/kcollins/opt/anaconda3/lib/python3.8/site-packages (0.2.1)
Requirement already satisfied: torch in /Users/kcollins/opt/anaconda3/lib/python3.8/site-packages (from higher) (1.10.2)
Requirement already satisfied: typing-extensions in /Users/kcollins/opt/anaconda3/lib/python3.8/site-packages (from torch->higher) (3.7.4.3)
Available device: cpu
###Markdown
Data Loading and Generation This Sine function generator is based on the repostory: https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
###Code
class SineWaveTask_multi:
def __init__(self,dimensions=20):
self.dimensions = dimensions
self.a = []
self.b = []
for dim in range(self.dimensions):
self.a.append(np.random.uniform(0.1, 5.0))
self.b.append(np.random.uniform(0, 2*np.pi))
self.train_x = None
def f(self, x,a,b):
return a * np.sin(x + b)
def training_set(self, size=10, force_new=False):
if self.train_x is None and not force_new:
self.train_x = np.random.uniform(-5, 5, size)
x = self.train_x
elif not force_new:
x = self.train_x
else:
x = np.random.uniform(-5, 5, size)
y = self.f(x,self.a[0],self.b[0])[:,None]
for dim in range(self.dimensions-1):
y = np.concatenate((y,self.f(x,self.a[dim+1],self.b[dim+1])[:,None]),axis=-1)
return torch.Tensor(x[:,None]), torch.Tensor(y)
def test_set(self, size=50):
x = np.linspace(-5, 5, size)
y = self.f(x,self.a[0],self.b[0])[:,None]
for dim in range(self.dimensions-1):
y = np.concatenate((y,self.f(x,self.a[dim+1],self.b[dim+1])[:,None]),axis=-1)
return torch.Tensor(x[:,None]), torch.Tensor(y)
TRAIN_SIZE = 20000
TEST_SIZE = 1000
SINE_TRAIN = [SineWaveTask_multi() for _ in range(TRAIN_SIZE)]
SINE_TEST = [SineWaveTask_multi() for _ in range(TEST_SIZE)]
x, y_true = SINE_TRAIN[0].training_set()
y_true.shape
###Output
_____no_output_____
###Markdown
Neural Network Model
###Code
# Define network
class Neural_Network_multi(nn.Module):
def __init__(self, input_size=1, hidden_size=40, output_size=20):
super(Neural_Network_multi, self).__init__()
# network layers
self.hidden1 = nn.Linear(input_size,hidden_size)
self.hidden2 = nn.Linear(hidden_size,hidden_size)
self.output_layer = nn.Linear(hidden_size,output_size)
#Activation functions
self.relu = nn.ReLU()
def forward(self, x):
x = self.hidden1(x)
x = self.relu(x)
x = self.hidden2(x)
x = self.relu(x)
x = self.output_layer(x)
y = x
return y
###Output
_____no_output_____
###Markdown
Helper functions
###Code
# The Minimum Square Error is used to evaluate the difference between prediction and ground truth
criterion = nn.MSELoss()
def copy_existing_model(model):
# Function to copy an existing model
# We initialize a new model
new_model = Neural_Network_multi()
# Copy the previous model's parameters into the new model
new_model.load_state_dict(model.state_dict())
return new_model
def get_samples_in_good_format(wave, num_samples=10, force_new=False):
#This function is used to sample data from a wave
x, y_true = wave.training_set(size=num_samples, force_new=force_new)
# We add [:,None] to get the right dimensions to pass to the model: we want K x 1 (we have scalars inputs hence the x 1)
# Note that we convert everything torch tensors
x = torch.tensor(x)
y_true = torch.tensor(y_true)
return x.to(device),y_true.to(device)
def initialization_to_store_meta_losses():
# This function creates lists to store the meta losses
global store_train_loss_meta; store_train_loss_meta = []
global store_test_loss_meta; store_test_loss_meta = []
def test_set_validation(model,new_model,wave,lr_inner,k,store_test_loss_meta):
# This functions does not actually affect the main algorithm, it is just used to evaluate the new model
new_model = training(model, wave, lr_inner, k)
# Obtain the loss
loss = evaluation(new_model, wave)
# Store loss
store_test_loss_meta.append(loss)
def train_set_evaluation(new_model,wave,store_train_loss_meta):
loss = evaluation(new_model, wave)
store_train_loss_meta.append(loss)
def print_losses(epoch,store_train_loss_meta,store_test_loss_meta,printing_step=1000):
if epoch % printing_step == 0:
print(f'Epochh : {epoch}, Average Train Meta Loss : {np.mean(store_train_loss_meta)}, Average Test Meta Loss : {np.mean(store_test_loss_meta)}')
#This is based on the paper update rule, we calculate the difference between parameters and then this is used by the optimizer, rather than doing the update by hand
def reptile_parameter_update(model,new_model):
# Zip models for the loop
zip_models = zip(model.parameters(), new_model.parameters())
for parameter, new_parameter in zip_models:
if parameter.grad is None:
parameter.grad = torch.tensor(torch.zeros_like(parameter))
# Here we are adding the gradient that will later be used by the optimizer
parameter.grad.data.add_(parameter.data - new_parameter.data)
# Define commands in order needed for the metaupdate
# Note that if we change the order it doesn't behave the same
def metaoptimizer_update(metaoptimizer):
# Take step
metaoptimizer.step()
# Reset gradients
metaoptimizer.zero_grad()
def metaupdate(model,new_model,metaoptimizer):
# Combine the two previous functions into a single metaupdate function
# First we calculate the gradients
reptile_parameter_update(model,new_model)
# Use those gradients in the optimizer
metaoptimizer_update(metaoptimizer)
def evaluation(new_model, wave, num_samples=10, force_new=False, item = False):
# Get data
x, label = get_samples_in_good_format(wave,num_samples=num_samples, force_new=force_new)
# Make model prediction
prediction = new_model(x)
# Get loss
if item == True: #Depending on whether we need to return the loss value for storing or for backprop
loss = criterion(prediction,label).item()
else:
loss = criterion(prediction,label)
return loss
def training(model, wave, lr_k, k):
# Create new model which we will train on
new_model = copy_existing_model(model)
# Define new optimizer
koptimizer = torch.optim.SGD(new_model.parameters(), lr=lr_k)
# Update the model multiple times, note that k>1 (do not confuse k with K)
for i in range(k):
# Reset optimizer
koptimizer.zero_grad()
# Evaluate the model
loss = evaluation(new_model, wave, item = False)
# Backpropagate
loss.backward()
koptimizer.step()
return new_model
# for MAML -- see MAML cell for additional citations around structure inspiration
def task_specific_train_and_eval(model, T_i, inner_loop_optimizer, N=1):
#Description of the loop formulation from https://higher.readthedocs.io/en/latest/toplevel.html
with innerloop_ctx(model, inner_loop_optimizer, copy_initial_weights = False) as (fmodel,diffopt):
#get our input data and our label
x, label = get_samples_in_good_format(T_i,num_samples=num_samples, force_new= True)
per_step_loss = []
for _ in range(N):
#Get the task specific loss for our model
task_specifc_loss = criterion(fmodel(x), label)
#Step through the inner gradient
diffopt.step(task_specifc_loss)
per_step_loss.append(task_specifc_loss.item())
held_out_task_specific_loss = evaluation(fmodel, T_i, num_samples=num_samples, force_new=True)
return held_out_task_specific_loss, per_step_loss, fmodel
###Output
_____no_output_____
###Markdown
Reptile
###Code
#Define important variables
epochs = int(1e5) # number of epochs
lr_meta=0.001 # Learning rate for meta model (outer loop)
printing_step=1000 # how many epochs should we wait to print the loss
lr_k=0.01 # Internal learning rate
k=5 # Number of internal updates for each task
# Initializations
initialization_to_store_meta_losses()
model = Neural_Network_multi()
metaoptimizer = torch.optim.Adam(model.parameters(), lr=lr_meta)
# Training loop
for epoch in range(epochs):
# Sample a sine wave (Task from training data)
wave = random.sample(SINE_TRAIN, 1)
# Update model predefined number of times based on k
new_model = training(model, wave[0], lr_k, k)
# Evalaute the loss for the training data
train_set_evaluation(new_model,wave[0],store_train_loss_meta)
#Meta-update --> Get gradient for meta loop and update
metaupdate(model,new_model,metaoptimizer)
# Evalaute the loss for the test data
# Note that we need to sample the wave from the test data
wave = random.sample(SINE_TEST, 1)
test_set_validation(model,new_model,wave[0],lr_k,k,store_test_loss_meta)
# Print losses every 'printing_step' epochs
print_losses(epoch,store_train_loss_meta,store_test_loss_meta,printing_step)
###Output
_____no_output_____
###Markdown
Few Shot learning with new meta-model The model performs good few shot learning
###Code
wave = SineWaveTask_multi();
k_shot_updates = 4
initialization_to_store_meta_losses()
for shots in range(k_shot_updates):
new_model = training(model, wave, lr_k, shots)
train_set_evaluation(new_model,wave,store_train_loss_meta)
plt.plot(store_train_loss_meta,label = 'Loss')
plt.legend()
plt.xlabel('k shots')
###Output
_____no_output_____
###Markdown
Second-Order MAML
###Code
'''
Handling computation graphs and second-order backprop help and partial inspiration from:
- https://discuss.pytorch.org/t/how-to-save-computation-graph-of-a-gradient/128286/2
- https://discuss.pytorch.org/t/when-do-i-use-create-graph-in-autograd-grad/32853/3
- https://lucainiaoge.github.io/download/PyTorch-create_graph-is-true_Tutorial_and_Example.pdf
- https://www.youtube.com/watch?v=IkDw22a8BDE
- https://discuss.pytorch.org/t/how-to-manually-update-network-parameters-while-keeping-track-of-its-computational-graph/131642/2
- https://discuss.pytorch.org/t/how-to-calculate-2nd-derivative-of-a-likelihood-function/15085/3
- https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html
- https://higher.readthedocs.io/en/latest/toplevel.html
Neural network configuration and helper class functions copied directly from
-https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
Note, different ways to refer to the task-specific vs. meta/aggregate updates to the parameters
Sometimes called "inner" and "outer" loop, respectively
Here, refered to as "task_specific" and "agg"/meta" (the latter, for consistency w/ ocariz code)
'''
#Instantiate the model network
model = Neural_Network_multi()
# move to the current device (GPU or CPU)
# help from: https://stackoverflow.com/questions/46704352/porting-pytorch-code-from-cpu-to-gpu
model.to(device)
T = 25 # num tasks
N = 1 # number of inner loop steps (notation from: https://www.bayeswatch.com/2018/11/30/HTYM/)
num_samples = 10 # number of samples to draw from the task
lr_task_specific = 0.01 # task specific learning rate
lr_meta = 0.001 # meta-update learning rate
num_epochs = 10000#70001 #Number of iterations for outer loop
printing_step = 5000 # show log of loss every x epochs
#Used to store the validation losses
metaLosses = []
metaValLosses = []
#Meta-optimizer for the outer loop
meta_optimizer = torch.optim.Adam(model.parameters(), lr = lr_meta)
cosScheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=meta_optimizer, T_max=num_epochs,
eta_min=0, verbose = False)
#Inner optimizer, we were doing this by hand previously
inner_loop_optimizer = torch.optim.SGD(model.parameters(), lr = lr_task_specific)
for epoch in range(num_epochs):
cosScheduler.step(epoch=epoch)
# store loss over all tasks to then do a large meta-level update of initial params
# idea/help from video: https://www.youtube.com/watch?v=IkDw22a8BDE
meta_loss = None
#Sample a new wave each time
waves = [SineWaveTask_multi() for _ in range(T)]
#Loop through all of the tasks
for i, T_i in enumerate(waves):
held_out_task_specific_loss, _, _ = task_specific_train_and_eval(model, T_i, inner_loop_optimizer, N)
if meta_loss is None:
meta_loss = held_out_task_specific_loss
else:
meta_loss += held_out_task_specific_loss
meta_optimizer.zero_grad()
meta_loss /= T
meta_loss.backward()
meta_optimizer.step()
metaLosses.append(meta_loss.item())
# validation
val_wave = SineWaveTask_multi() # our own addition -- can vary
val_loss, _, _ = task_specific_train_and_eval(model, val_wave, inner_loop_optimizer, N)
metaValLosses.append(val_loss.item())
if epoch % printing_step == 0:
print("Iter = ", epoch, " Current Loss", np.mean(metaLosses), " Val Loss: ", np.mean(metaValLosses))
# saving model help from:
# https://pytorch.org/tutorials/beginner/saving_loading_models.html
torch.save(model.state_dict(), f"{domain_type}_maml_model.pt")
###Output
Iter = 0 Current Loss 4.402935028076172 Val Loss: 4.536915302276611
Iter = 5000 Current Loss 3.526278177992484 Val Loss: 3.5197749872299178
###Markdown
Few Shot learning with new meta-model (MAML)
###Code
# run k-shot to check how rapidly we are able to adapt to unseen tasks
# starting w/ a single unseen task
test_wave = SineWaveTask_multi()
num_k_shots = 10
# use model returned from earlier optimization
inner_loop_optimizer = torch.optim.SGD(model.parameters(), lr = lr_task_specific)
held_out_task_specific_loss, metaTrainLosses, _ = task_specific_train_and_eval(model, test_wave, inner_loop_optimizer, num_k_shots)
plt.plot(metaTrainLosses)
plt.xlim([0,num_k_shots])
all_losses = []
num_eval = 100
num_k_shots = 10
for test_eval in range(num_eval):
test_wave = SineWaveTask_multi()
# use model returned from earlier optimization
inner_loop_optimizer = torch.optim.SGD(model.parameters(), lr = lr_task_specific)
held_out_task_specific_loss, metaTrainLosses, _ = task_specific_train_and_eval(model, test_wave, inner_loop_optimizer, num_k_shots)
all_losses.append(np.array(metaTrainLosses))
all_losses = np.array(all_losses)
np.save(f"maml_ca_multi_sine_{num_k_shots}.npy", all_losses)
fig, ax = plt.subplots(figsize=(8,4))
mean_loss = np.mean(all_losses, axis=0)
# confidence interval plotting help from: https://stackoverflow.com/questions/59747313/how-to-plot-confidence-interval-in-python
y = mean_loss
x = list(range(num_k_shots))
ci = 1.96 * np.std(all_losses, axis=0)**2/np.sqrt(len(y))
ax_size=16
title_size=18
ax.plot(x, y, linewidth=3, label=f"Mean Loss")
ax.fill_between(x, (y-ci), (y+ci), alpha=.5,label=f"95% CI")
ax.set_xlabel("Gradient Steps",fontsize=ax_size)
ax.set_ylabel("Mean Squared Error (MSE)",fontsize=ax_size)
ax.set_title("Sine Wave Regression: k-Shot Evaluation",fontsize=title_size)
ax.legend()#loc="upper right")
plt.savefig("sine_ca_wave_multidim_reg_kshot.png")
analysis_steps = [0, 1, num_k_shots-1]
for analysis_step in analysis_steps:
print(f"Step: {analysis_step}, Error: {mean_loss[analysis_step]}, Var: {ci[analysis_step]}")
## Second-Order MAML
'''
Handling computation graphs and second-order backprop help and partial inspiration from:
- https://discuss.pytorch.org/t/how-to-save-computation-graph-of-a-gradient/128286/2
- https://discuss.pytorch.org/t/when-do-i-use-create-graph-in-autograd-grad/32853/3
- https://lucainiaoge.github.io/download/PyTorch-create_graph-is-true_Tutorial_and_Example.pdf
- https://www.youtube.com/watch?v=IkDw22a8BDE
- https://discuss.pytorch.org/t/how-to-manually-update-network-parameters-while-keeping-track-of-its-computational-graph/131642/2
- https://discuss.pytorch.org/t/how-to-calculate-2nd-derivative-of-a-likelihood-function/15085/3
- https://pytorch.org/tutorials/recipes/recipes/zeroing_out_gradients.html
- https://higher.readthedocs.io/en/latest/toplevel.html
Neural network configuration and helper class functions copied directly from
-https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
Note, different ways to refer to the task-specific vs. meta/aggregate updates to the parameters
Sometimes called "inner" and "outer" loop, respectively
Here, refered to as "task_specific" and "agg"/meta" (the latter, for consistency w/ ocariz code)
'''
#Instantiate the model network
model = Neural_Network_multi()
# move to the current device (GPU or CPU)
# help from: https://stackoverflow.com/questions/46704352/porting-pytorch-code-from-cpu-to-gpu
model.to(device)
T = 25 # num tasks
N = 1 # number of inner loop steps (notation from: https://www.bayeswatch.com/2018/11/30/HTYM/)
num_samples = 10 # number of samples to draw from the task
lr_task_specific = 0.01 # task specific learning rate
lr_meta = 0.001 # meta-update learning rate
num_epochs = 10000#70001 #Number of iterations for outer loop
printing_step = 5000 # show log of loss every x epochs
#Used to store the validation losses
metaLosses = []
metaValLosses = []
#Meta-optimizer for the outer loop
meta_optimizer = torch.optim.Adam(model.parameters(), lr = lr_meta)
#Inner optimizer, we were doing this by hand previously
inner_loop_optimizer = torch.optim.SGD(model.parameters(), lr = lr_task_specific)
for epoch in range(num_epochs):
# store loss over all tasks to then do a large meta-level update of initial params
# idea/help from video: https://www.youtube.com/watch?v=IkDw22a8BDE
meta_loss = None
#Sample a new wave each time
waves = [SineWaveTask_multi() for _ in range(T)]
#Loop through all of the tasks
for i, T_i in enumerate(waves):
held_out_task_specific_loss, _, _ = task_specific_train_and_eval(model, T_i, inner_loop_optimizer, N)
if meta_loss is None:
meta_loss = held_out_task_specific_loss
else:
meta_loss += held_out_task_specific_loss
meta_optimizer.zero_grad()
meta_loss /= T
meta_loss.backward()
meta_optimizer.step()
metaLosses.append(meta_loss.item())
# validation
val_wave = SineWaveTask_multi() # our own addition -- can vary
val_loss, _, _ = task_specific_train_and_eval(model, val_wave, inner_loop_optimizer, N)
metaValLosses.append(val_loss.item())
if epoch % printing_step == 0:
print("Iter = ", epoch, " Current Loss", np.mean(metaLosses), " Val Loss: ", np.mean(metaValLosses))
# saving model help from:
# https://pytorch.org/tutorials/beginner/saving_loading_models.html
torch.save(model.state_dict(), f"{domain_type}_maml_model.pt")
###Output
_____no_output_____ |
old/Chapter02/activity_5/Activity_5_Assembling_a_Deep_Learning_System.ipynb | ###Markdown
Activity 5: Assembling a Deep Learning SystemIn this activity, we will train the first version of our LSTM model using Bitcoin daily closing prices. These prices will be organized using the weeks of both 2016 and 2017. We do that because we are interested in predicting the prices of a week's worth of trading.Let's go ahead and import our data.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from tensorflow.keras.models import load_model
plt.style.use('seaborn-white')
###Output
_____no_output_____
###Markdown
Shaping DataNeural networks typically work with vectors and tensors, both mathematical objects that organize data in a number of dimensions.
###Code
train = pd.read_csv('data/train_dataset.csv')
train.head()
###Output
_____no_output_____
###Markdown
LSTM networks require vectors with three dimensions. These dimensions are: * **Period length**: The period length, i.e. how many observations is there on a period.* **Number of periods**: How many periods are available in the dataset.* **Number of features**: Number of features available in the dataset.We will create weekly groups then rearrange the resulting array to match those dimensions.
###Code
def create_groups(data, group_size=7):
"""
Creates distinct groups from a given continuous series.
Parameters
----------
data: np.array
Series of continious observations.
group_size: int, default 7
Determines how large the groups are. That is,
how many observations each group contains.
Returns
-------
A Numpy array object.
"""
samples = list()
for i in range(0, len(data), group_size):
sample = list(data[i:i + group_size])
if len(sample) == group_size:
samples.append(np.array(sample).reshape(1, group_size).tolist())
return np.array(samples)
data = create_groups(train['close_point_relative_normalization'].values, 7)
len(data)
X_train = data[:-1,:].reshape(1, 186, 7)
Y_validation = data[-1].reshape(1, 7)
###Output
_____no_output_____
###Markdown
Load Our ModelLet's start by loading our previously trained model.
###Code
model = load_model('bitcoin_lstm_v0.h5')
###Output
_____no_output_____
###Markdown
Make Predictions
###Code
%%time
history = model.fit(
x=X_train, y=Y_validation,
epochs=100)
model.save('bitcoin_lstm_v0_trained.h5')
pd.Series(history.history['loss']).plot(linewidth=2, figsize=(14, 4), color='#d35400')
def denormalize(series, last_value):
result = last_value * (series + 1)
return result
predictions = model.predict(x=X_train)[0]
last_weeks_value = train[train['date'] == train['date'].max()]['close'].values[0]
denormalized_prediction = denormalize(predictions, last_weeks_value)
pd.DataFrame(denormalized_prediction).plot(linewidth=2, figsize=(6, 4), color='#d35400', grid=True)
full_series = list(train['close'].values) + list(denormalized_prediction)
pd.DataFrame(full_series[-7*8:]).plot(linewidth=2, figsize=(14, 4), color='#d35400', grid=True)
plt.axvline(len(full_series[-7*8:]) - 7)
###Output
_____no_output_____ |
notebooks/6-04.ipynb | ###Markdown
6.4: データの関係の可視化
###Code
# リスト 6.4.1 tips データセットの読み込み
import seaborn as sns
tips = sns.load_dataset("tips")
tips.head()
# リスト 6.4.2 relplot() 関数で描画した散布図
sns.relplot(data=tips, x="total_bill", y="tip", kind="scatter")
# リスト 6.4.3 time 列で色分けした散布図
sns.relplot(data=tips, x="total_bill", y="tip", hue="time")
# リスト 6.4.4 day 列でマーカーを分けた散布図
sns.relplot(data=tips, x="total_bill", y="tip", hue="time", style="day")
# リスト 6.4.5 要素の値を色で表現した散布図
sns.relplot(data=tips, x="total_bill", y="tip", hue="size")
# リスト 6.4.6 要素の値を大きさで表現した散布図
sns.relplot(data=tips, x="total_bill", y="tip", size="size")
# リスト 6.4.7 fmri データセットの読み込み
fmri = sns.load_dataset("fmri")
fmri.head()
# リスト 6.4.8 折れ線グラフの描画
import matplotlib.pyplot as plt
sorted_fmri = fmri.sort_values("timepoint")
fig, ax = plt.subplots()
ax.plot(sorted_fmri["timepoint"], sorted_fmri["signal"])
# リスト 6.4.9 relplot 関数を用いて折れ線グラフを描画
sns.relplot(data=fmri, x="timepoint", y="signal", kind="line")
# リスト 6.4.10 塗りつぶし色を標準偏差で描画
sns.relplot(data=fmri, x="timepoint", y="signal", kind="line", ci="sd")
# リスト 6.4.11 データの集計をしない折れ線グラフ
sns.relplot(data=fmri, x="timepoint", y="signal", kind="line", estimator=None)
# リスト 6.4.12 event 列のデータの種類
fmri["event"].unique()
# リスト 6.4.13 region 列のデータの種類
fmri["region"].unique()
# リスト 6.4.14 色と線種で分類した複数の折れ線グラフ
sns.relplot(
data=fmri, x="timepoint", y="signal", kind="line", hue="event", style="region"
)
# リスト 6.4.15 ファセットを利用して散布図を描画
sns.relplot(data=tips, x="total_bill", y="tip", col="time", row="smoker")
# リスト 6.4.16 ファセットを利用して折れ線グラフを描画
sns.relplot(
data=fmri, x="timepoint", y="signal", kind="line", row="event", col="region"
)
###Output
_____no_output_____ |
Taller ETVL CarlosCeballos.ipynb | ###Markdown
Taller evaluable sobre la extracción, transformación y visualización de datos usando IPython **Juan David Velásquez Henao** [email protected] Universidad Nacional de Colombia, Sede Medellín Facultad de Minas Medellín, Colombia Instrucciones En la carpeta 'Taller' del repositorio 'ETVL-IPython' se encuentran los archivos 'Precio_Bolsa_Nacional_($kwh)_'*'.xls' en formato de Microsoft Excel, los cuales contienen los precios históricos horarios de la electricidad para el mercado eléctrico Colombiano entre los años 1995 y 2017 en COL-PESOS/kWh. A partir de la información suministrada resuelva los siguientes puntos usando el lenguaje de programación Python. Preguntas **1.--** Lea los archivos y cree una tabla única concatenando la información para cada uno de los años. Imprima el encabezamiento de la tabla usando `head()`.
###Code
import os
import pandas as pd
x=[]
for n in range(1995,2018):
if n<2000:
skip=3
else:
skip=2
filename='Precio_Bolsa_Nacional_($kwh)_'+str(n)
if n>= 2016:
filename+='.xls'
else:
filename+='.xlsx'
y=pd.read_excel(filename,skiprows=skip,parse_cols=24)
x.append(y)
z=pd.concat(x)
len(z)
index=list(range(0,len(z)))
z.index = index
###Output
_____no_output_____
###Markdown
**2.--** Compute e imprima el número de registros con datos faltantes.
###Code
r=len(z)-len(z.dropna())
r
###Output
_____no_output_____
###Markdown
**3.--** Compute e imprima el número de registros duplicados.
###Code
m=z[z.duplicated()]
len(m)
###Output
_____no_output_____
###Markdown
**4.--** Elimine los registros con datos duplicados o datos faltantes, e imprima la cantidad de registros que quedan (registros completos).
###Code
#print(len(z))
m2=z.dropna()
#print(len(m2))
m3=m2.drop_duplicates()
print(len(m3))
###Output
7875
###Markdown
**5.--** Compute y grafique el precio primedio diario.
###Code
#Precio promedio diario
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
m4=m3
m4['mean']=m3.mean(axis=1)
st=pd.to_datetime(m3['Fecha'],infer_datetime_format=True)
m4['Fecha']=st
m4['dia']=st.dt.dayofweek
m4['mes']=st.dt.month
m4['año']=st.dt.year
plt.plot(m4['Fecha'],m4['mean'],)
plt.ylabel('$')
plt.xlabel('año')
plt.title('Precio Promedio Diario del kWh 1995-2017')
plt.show()
###Output
C:\Users\Sarita\Anaconda3\lib\site-packages\ipykernel\__main__.py:9: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
C:\Users\Sarita\Anaconda3\lib\site-packages\ipykernel\__main__.py:12: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
C:\Users\Sarita\Anaconda3\lib\site-packages\ipykernel\__main__.py:13: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
C:\Users\Sarita\Anaconda3\lib\site-packages\ipykernel\__main__.py:14: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
C:\Users\Sarita\Anaconda3\lib\site-packages\ipykernel\__main__.py:15: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
###Markdown
**6.--** Compute y grafique el precio máximo por mes.
###Code
w = []
m5=m4
for n in range(len(m3['Fecha'])):
w.append(str(m3.iloc[n,0])[0:7])
m5['key']=w
#Precio Máximo del mes
%matplotlib inline
y=list(m4.axes[1])
m5['max']=m4[y[1:25]].apply(max,axis=1)
b=m5.groupby('key').max()['max']
b.plot()
plt.ylabel('$')
plt.xlabel('Mes')
plt.title('Precio Máximo Mes de kWh')
###Output
C:\Users\Sarita\Anaconda3\lib\site-packages\ipykernel\__main__.py:5: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
###Markdown
**7.--** Compute y grafique el precio mínimo mensual.
###Code
#Precio minimo mensual
%matplotlib inline
m6=m5
y=list(m4.axes[1])
m6['min']=m4[y[1:25]].apply(min,axis=1)
b3=m6.groupby('key').min()['min']
b3.plot()
plt.ylabel('$')
plt.xlabel('Mes')
plt.title('Precio Mínimo Mes de kWh')
###Output
C:\Users\Sarita\Anaconda3\lib\site-packages\ipykernel\__main__.py:6: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
###Markdown
**8.--** Haga un gráfico para comparar el precio máximo del mes (para cada mes) y el precio promedio mensual.
###Code
#Precio máximo del mes y precio promedio mensual
b4=m6.groupby('key').mean()['mean']
plt.figure()
b4.plot(legend='mean')
b.plot(legend='max')
plt.ylabel('$')
plt.xlabel('Mes')
plt.title('Comparativo Precio Máximo y promedio Mes de kWh')
###Output
_____no_output_____
###Markdown
**9.--** Haga un histograma que muestre a que horas se produce el máximo precio diario para los días laborales.
###Code
#Histograma de horas con precio máximo días laborales
%matplotlib inline
import numpy as np
from datetime import datetime, date, time, timedelta
import calendar
fecha=[]
fecha=m6['Fecha']
m6['Fecha']=pd.to_datetime(m6['Fecha'], format="%Y-%m-%d")
m6['Dia']=m6['Fecha'].dt.weekday_name
Lab = m6['Dia'].isin(['Monday','Tuesday','Wednesday','Thursday','Friday'])
Lab = m6[Lab]
indicador = ['{}'.format(n) for n in range(len(Lab))]
Lab.index = indicador
t=[]
for n in range(len(Lab)):
x = pd.Series(Lab.loc[str(n)]).values[1:25]
t.append ([i for i, e in enumerate(x) if e == max(x)])
a=[]
for n in range(len(t)):
for i in range (len(t[n])):
a.append(t[n][i])
rep=[]
for n in range (24):
rep.append(a.count(n))
plt.xlabel("Horas")
plt.ylabel("$/kWh")
plt.bar(range(24),rep,color='r',width = 0.9)
plt.show()
###Output
C:\Users\Sarita\Anaconda3\lib\site-packages\ipykernel\__main__.py:13: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
C:\Users\Sarita\Anaconda3\lib\site-packages\ipykernel\__main__.py:14: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
###Markdown
**10.--** Haga un histograma que muestre a que horas se produce el máximo precio diario para los días sabado.
###Code
#Histograma de horas con precio máximo día Sabado
Sab = m6['Dia'].isin(['Saturday'])
Sab = m6[Sab]
indicador = ['{}'.format(n) for n in range(len(Sab))]
Sab.index = indicador
s=[]
for n in range(len(Sab)):
x = pd.Series(Sab.loc[str(n)]).values[1:25]
s.append ([i for i, e in enumerate(x) if e == max(x)])
a=[]
for n in range(len(s)):
for i in range (len(s[n])):
a.append(s[n][i])
rep=[]
for n in range (24):
rep.append(a.count(n))
plt.xlabel("Hora")
plt.ylabel("Frecuencia")
plt.title('Sabado')
plt.bar(range(24),rep,color='blue',width = 0.9)
plt.show()
###Output
_____no_output_____
###Markdown
**11.--** Haga un histograma que muestre a que horas se produce el máximo precio diario para los días domingo.
###Code
#Histograma de horas con precio máximo día Domingo
Sun = m6['Dia'].isin(['Sunday'])
Sun = m6[Sun]
indicador = ['{}'.format(n) for n in range(len(Sun))]
Sun.index = indicador
s=[]
for n in range(len(Sun)):
x = pd.Series(Sun.loc[str(n)]).values[1:25]
s.append ([i for i, e in enumerate(x) if e == max(x)])
a=[] # Este Fragmento hace una matriz de la matriz u que contenia horas en que el maximo se repetia.
for n in range(len(s)):
for i in range (len(s[n])):
a.append(s[n][i])
rep=[]
for n in range (24):
rep.append(a.count(n))
plt.bar(range(24),rep,color='g',width = 0.9)
plt.show()
###Output
_____no_output_____
###Markdown
**12.--** Imprima una tabla con la fecha y el valor más bajo por año del precio de bolsa.
###Code
#Matriz con valores mínimos anuales - Se eliminaron los datos de Cero ya que este precio no es lógico
matrizSinCero = m6[m6>0].dropna()
Agrupac=matrizSinCero.groupby('año')['min'].idxmin()
ValorMinAnio=matrizSinCero.loc[Agrupac]
ValorMinAnio.filter(['año','Fecha','min'], axis=1)
###Output
_____no_output_____
###Markdown
**13.--** Haga una gráfica en que se muestre el precio promedio diario y el precio promedio mensual.
###Code
# Precio promedio mensual
b4=m6.groupby('key').mean()['mean']
plt.figure()
b4.plot(legend='promMes')
plt.show
plt.ylabel('$')
plt.xlabel('Mes')
plt.title('Promedio Mes')
plt.show()
# precio promedio diario
plt.plot(m4['Fecha'],m4['mean'],)
plt.ylabel('$')
plt.xlabel('año')
plt.title('Precio Promedio Diario del kWh 1995-2017')
plt.show()
###Output
_____no_output_____ |
redmart/Redmart API.ipynb | ###Markdown
Redmart: API Category: Redmart Label
###Code
params = {
'q': 'redmart',
'category': 'redmart-label',
'pageSize': 1,
'sort': 1024
}
response = requests.get(REDMART_SEARCH_API, params=params)
response_json = response.json()
print(response_json['products'])
###Output
[{'category_tags': ['food-cupboard', 'cereal', 'healthier-choices', 'oats-6939', 'redmart-label', 'food-cupboard-6928', 'beverages', 'hot-cereals', 'instant-8296', 'chinese-new-year', 'redmart-label-3450', 'food-cupboard-10407', 'food-cupboard', 'breakfast-on-the-go', 'breakfast-drinks', 'chocolate-nutritional-drinks', 'instant-cereal', 'under8', 'payday-sale', 'btbasics', 'kitchen-hacks', 'breakfast-deals', 'oats', 'gourmet-breakfast-club', 'hungry-ghost-festival', 'clearance', 'clearance2', 'private-label', 'aus-day', 'valentines-day-0', 'cafe-treats-made-easy', 'smoothies-20409', 'march-mania', '38-and-under', 'food-impulse-21290'], 'id': 52905, 'title': 'RedMart Instant Oatmeal', 'desc': 'Our Australian instant oatmeal is 100-Percent wholegrain and high in fiber. Cooks in one and half minutes makes for a quick and easy breakfast.', 'sku': '8881304288255', 'categories': [1, 14, 6928, 6937, 2, 8296, 3450, 10407, 1, 216, 11611, 12824, 14164, 14391, 11084, 14910, 14966, 281, 16275, 17807, 20221, 20403, 20409, 21286, 21290], 'types': [6939, 8298, 10410, 6940, 1535, 14571, 20413, 21317], 'details': {'prod_type': 0, 'uri': 'redmart--instant-oatmeal-52905', 'status': 1, 'is_new': 0.0, 'storage_class': 'AmbientFB', 'country_of_origin': 'Australia'}, 'filters': {'mfr_name': 'UniGrain Pty Ltd', 'trending_frequency': 6, 'is_organic': 0, 'country_of_origin': 'Australia', 'vendor_name': 'Raw', 'brand_name': 'RedMart', 'brand_uri': 'redmart', 'frequency': 703, 'brand': 145664}, 'img': {'h': 0, 'w': 0, 'name': '/i/m/88813042882550027_1478140929051.jpg', 'position': 0}, 'images': [{'h': 0, 'w': 0, 'name': '/i/m/88813042882550027_1478140929051.jpg', 'position': 0}, {'h': 0, 'w': 0, 'name': '/i/m/88813042882550028_1478140931287.jpg', 'position': 1}], 'measure': {'wt_or_vol': '1 kg', 'size': 0.0}, 'pricing': {'on_sale': 0, 'price': 5.0, 'promo_price': 0.0, 'savings': 0.0}, 'warehouse': {'measure': {'vol_metric': '', 'wt': 1.0, 'wt_metric': 'kg', 'l': 0.0, 'w': 0.0, 'h': 0.0}}, 'attributes': {'dag': []}, 'description_fields': {'secondary': [{'name': 'Ingredients', 'content': 'Oats 100% GMO Free.'}, {'name': 'Material Composition', 'content': ''}, {'name': 'Storage Guidelines', 'content': 'Store in a cool, dry place. Once opened, transfer into an airtight container.'}, {'name': 'Cooking & Preparation', 'content': ''}, {'name': 'Return Policy', 'content': ''}, {'name': 'Certifications & Endorsements', 'content': ''}, {'name': 'Washing/Cleaning Instructions', 'content': ''}, {'name': 'About the Brand', 'content': ''}], 'primary': [{'name': 'Dimensions', 'content': ''}, {'name': 'Dietary Information', 'content': ''}, {'name': 'How to Use', 'content': 'Place 50g of oats into a large non-metallic bowl, add 300ml of cold water or mlk. Cook on full power for 1 minutes (800W) or 50 seconds (900W). Stir and then cook on a full power for a further 30 seconds (800W/900W). Leave to stand for 1 minute. Add sugar or salt to taste. Do not reheat.'}, {'name': 'MAHP/MAHS', 'content': ''}, {'name': 'Country of Origin', 'content': 'Australia'}]}, 'product_life': {'time': 45, 'metric': 'D', 'is_minimum': True, 'time_including_delivery': 46}, 'serviceability': ['STANDARD'], 'inventory': {'atp_status': 0, 'max_sale_qty': 48, 'qty_in_carts': 0, 'qty_in_stock': 48, 'stock_status': 1, 'stock_type': 5.0, 'next_available_date': '2019-04-08T15:51:20Z', 'limited_stock_status': 0, 'atp_lots': [{'from_date': '2019-04-08T15:51:20Z', 'to_date': '2020-02-01T15:59:59Z', 'stock_status': 1, 'qty_in_stock': 48, 'qty_in_carts': 0}, {'from_date': '2019-04-08T15:51:20Z', 'to_date': '2020-02-28T15:59:59Z', 'stock_status': 1, 'qty_in_stock': 48, 'qty_in_carts': 0}], 'fulfillment_source': 'SG.WESTFC', 'delivery_option': 'STANDARD'}, 'inventories': [{'atp_status': 0, 'max_sale_qty': 48, 'qty_in_carts': 0, 'qty_in_stock': 48, 'stock_status': 1, 'stock_type': 5.0, 'next_available_date': '2019-04-08T15:51:20Z', 'limited_stock_status': 0, 'atp_lots': [{'from_date': '2019-04-08T15:51:20Z', 'to_date': '2020-02-01T15:59:59Z', 'stock_status': 1, 'qty_in_stock': 48, 'qty_in_carts': 0}, {'from_date': '2019-04-08T15:51:20Z', 'to_date': '2020-02-28T15:59:59Z', 'stock_status': 1, 'qty_in_stock': 48, 'qty_in_carts': 0}], 'fulfillment_source': 'SG.WESTFC', 'delivery_option': 'STANDARD'}], 'pr': 1}]
|
robin_wenzel_assignment_4/Assignment-4_Language_model.ipynb | ###Markdown
Introduction..... Check to see if jupyter lab uses the correct python interpreter with '!which python'.It should be something like '/opt/anaconda3/envs/[environment name]/bin/python' (on Mac).If not, try this: https://github.com/jupyter/notebook/issues/3146issuecomment-352718675
###Code
!which python
###Output
/d/Users/robin/anaconda3/envs/robin/python
###Markdown
Install dependencies:
###Code
install_packages = True
if install_packages:
!conda install tensorflow=2 -y
!conda install -c anaconda pandas -y
!conda install -c conda-forge tensorflow-hub -y
!conda install -c conda-forge html2text -y
!conda install -c conda-forge tqdm -y
!conda install -c anaconda scikit-learn -y
!conda install -c conda-forge matplotlib -y
!conda install -c anaconda seaborn -y
###Output
Collecting package metadata (current_repodata.json): ...working... done
Solving environment: ...working... done
## Package Plan ##
environment location: D:\Users\robin\anaconda3\envs\robin
added / updated specs:
- tensorflow=2
The following packages will be downloaded:
package | build
---------------------------|-----------------
ca-certificates-2020.10.14 | 0 122 KB
openssl-1.1.1h | he774522_0 4.8 MB
------------------------------------------------------------
Total: 5.0 MB
The following packages will be UPDATED:
certifi anaconda::certifi-2020.6.20-py36_0 --> pkgs/main::certifi-2020.11.8-py36haa95532_0
The following packages will be SUPERSEDED by a higher-priority channel:
ca-certificates anaconda --> pkgs/main
openssl anaconda --> pkgs/main
Downloading and Extracting Packages
openssl-1.1.1h | 4.8 MB | | 0%
openssl-1.1.1h | 4.8 MB | 3 | 4%
openssl-1.1.1h | 4.8 MB | ######4 | 65%
openssl-1.1.1h | 4.8 MB | ########## | 100%
openssl-1.1.1h | 4.8 MB | ########## | 100%
ca-certificates-2020 | 122 KB | | 0%
ca-certificates-2020 | 122 KB | #3 | 13%
ca-certificates-2020 | 122 KB | ########## | 100%
Preparing transaction: ...working... done
Verifying transaction: ...working... done
Executing transaction: ...working... done
###Markdown
Imports
###Code
#imports
import pandas as pd
import numpy as np
import os
import time
import tensorflow as tf
import tensorflow_hub as hub
import zipfile
from html2text import HTML2Text
from tqdm import tqdm
import re
from sklearn.metrics import pairwise_distances
from sklearn.preprocessing import normalize
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Set pandas print optionsThis will improve readability of printed pandas dataframe.
###Code
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
###Output
_____no_output_____
###Markdown
Set global ParametersSet your parameters here:data_path: In this path put the data you have downloaded with YouTube Data Tools. output_path: Tghe files generated in this notebook will be saved here.url_dict: URLs to models on Tensorflow hub are saved here. Other models are available there.model_type: Define which model you would like to use. Choose one from url_dictnew_embeddings: If this is true, new embeddings will be generated and saved at output_path. Otherwise, embeddings are loaded from Disc.
###Code
data_path =r'D:\Users\Robin\Downloads\videoinfo_v8dXpe1Pp6Q_2020_11_20-12_53_38_comments.tab'
output_path = "./output/"
new_embeddings = False
url_dict = {
'Transformer' : "https://tfhub.dev/google/universal-sentence-encoder-large/5",
'DAN' : "https://tfhub.dev/google/universal-sentence-encoder/4",
'Transformer_Multilingual': "https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3"
}
model_type = 'Transformer' #@param ['DAN','Transformer','Transformer_Multilingual']
###Output
_____no_output_____
###Markdown
Create output directoryTry to create the directory defined by output_path
###Code
try:
os.mkdir(output_path)
except OSError:
print ("Creation of the directory %s failed" % output_path)
else:
print ("Successfully created the directory %s " % output_path)
###Output
Creation of the directory ./output/ failed
###Markdown
Load DataLoad you data as a pandas dataframe
###Code
if new_embeddings:
data = pd.read_csv(data_path,sep='\t',header=(0))
data.head()
###Output
_____no_output_____
###Markdown
PreprocessingPreprocess your data:- Drop empty rows- Drop unused columns
###Code
if new_embeddings:
data = data.dropna(subset=['text', 'authorName']) # drop rows with no content
data=data.drop(['id', 'replyCount','likeCount','authorChannelUrl','authorChannelId','isReplyTo','isReplyToName'],axis=1) # drop unused columns
data.head()
###Output
_____no_output_____
###Markdown
- remove HTML-tags, links and usernames
###Code
if new_embeddings:
# Remove HTML tags
tqdm.pandas()
h = HTML2Text()
h.ignore_links = True
data['cleaned'] = data['text'].progress_apply(lambda x: h.handle(x))
print( "Removed HTML Tags.")
# Remove links
http_link_pattern = r'http\S+'
bitly_link_pattern = r'bit.ly/\S+'
data['cleaned'] = data['cleaned'].str.replace(http_link_pattern, '')
data['cleaned'] = data['cleaned'].str.replace(bitly_link_pattern, '')
print( "Removed Links.")
# Remove user names
keep_names = ["earth", "Tide", "Geologist", "A Person", "Titanic", "adventure", "Sun", "The United States Of America"] # user names we want to keep
user_names = [name for name in data['authorName'].unique() if (len(name)> 3 and name not in keep_names)]
data['cleaned'] = data['cleaned'].str.replace('|'.join(map(re.escape, user_names)), '')
print( "Removed user names.")
###Output
100%|████████████████████████████████████████████████████████████████████████████| 2420/2420 [00:00<00:00, 6685.16it/s]
###Markdown
Save or Load preprocessed dataSave your data afte preprocessing, or load preprocessed data from disc.
###Code
if new_embeddings:
data.to_pickle(output_path+'data_preprocessed'+'.pkl')
else:
data = pd.read_pickle(output_path+'data_preprocessed'+'.pkl')
data.head()
###Output
_____no_output_____
###Markdown
Produce Text Embeddings with Universal Sentence Encoder Load ModelLoad the model from TF-hub
###Code
hub_url = url_dict[model_type]
if new_embeddings:
print("Loading model. This will take some time...")
embed = hub.load(hub_url)
###Output
_____no_output_____
###Markdown
Embed DocumentsProduce embeddings of your documents.
###Code
if new_embeddings:
for k,g in data.groupby(np.arange(len(data))//200):
if k == 0:
embeddings = embed(g['cleaned'])
else:
embeddings_new = embed(g['cleaned'])
embeddings = tf.concat(values=[embeddings,embeddings_new],axis = 0)
print(k , end =" ")
print("The embeddings vector is of fixed length {}".format(embeddings.shape[1]))
np.save(output_path+'/embeddings'+model_type+'.npy', embeddings, allow_pickle=True, fix_imports=True)
else:
embeddings = np.load(output_path+'/embeddings'+model_type+'.npy', mmap_mode=None, allow_pickle=False, fix_imports=True, encoding='ASCII')
embeddings.shape
###Output
_____no_output_____
###Markdown
Calculate Similarity Matrix with angular distance'Following Cer et al. (2018), we first computethe sentence embeddings u, v for an STS sentencepair, and then score the sentence pair similaritybased on the angular distance between the twoembedding vectors d = − arccos (uv/||u|| ||v||).'
###Code
from sklearn.metrics.pairwise import cosine_similarity
def cos_sim(input_vectors):
similarity = cosine_similarity(input_vectors)
return similarity
cosine_similarity_matrix = cos_sim(np.array(embeddings))
print(cosine_similarity_matrix)
###Output
[[ 0.99999994 0.02017658 0.13172205 ... 0.19374986 0.15134448
-0.00883224]
[ 0.02017658 1.0000001 0.01142771 ... 0.21600701 -0.02442844
0.14825132]
[ 0.13172205 0.01142771 1. ... -0.12039489 0.03108918
-0.01545321]
...
[ 0.19374986 0.21600701 -0.12039489 ... 0.99999976 0.34049508
0.12608662]
[ 0.15134448 -0.02442844 0.03108918 ... 0.34049508 0.99999994
0.04931928]
[-0.00883224 0.14825132 -0.01545321 ... 0.12608662 0.04931928
1. ]]
###Markdown
Plots Similarity Plot and print a heat map showing the semantic contextual similarity between comments.
###Code
import seaborn as sns
def plot_similarity(labels, features, rotation):
corr = np.inner(features, features)
sns.set(font_scale=1.2)
g = sns.heatmap(
corr,
xticklabels=labels,
yticklabels=labels,
vmin=0,
vmax=1,
cmap="YlOrRd")
g.set_xticklabels(labels, rotation=rotation)
g.set_title("Semantic Textual Similarity")
num_samples = 5
off_set = 10000
plot_similarity(data.iloc[off_set:off_set+num_samples]['cleaned'], embeddings[off_set:off_set+num_samples], 90)
###Output
D:\Users\robin\anaconda3\envs\robin\lib\site-packages\seaborn\matrix.py:311: UserWarning: Attempting to set identical left == right == 0 results in singular transformations; automatically expanding.
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
D:\Users\robin\anaconda3\envs\robin\lib\site-packages\seaborn\matrix.py:311: UserWarning: Attempting to set identical bottom == top == 0 results in singular transformations; automatically expanding.
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
###Markdown
Show neighbours of a comment Define which comment to analyze
###Code
comment_index = 324
comment = data["cleaned"][comment_index]
comment_list = data["cleaned"].tolist()
print(comment)
###Output
Unfortunatelty for human kind and by the way biodeversity, her beauty is as
big as her stupidity and ignorance; I mean she has a so low consciousness
level, and no scientific thinking basis to understand such questions. She
should just talk about fashion, shopping, like other girls like her, something
more in her scope. Or maybe I am wrong, because she has seemed to study a bit
some Physics, and so maybe she can understand these kind of questions but then
she is so bad, so dishonest with so low values, that she is just corrupted by
big companies for the best of her wallet and the worst for the world.
We could make a song for her after the one from Nirvana, like : the girl who
solds the world; which is more appropiate and it rhymes at least
###Markdown
Print similar comments.
###Code
def get_top_similar(sentence, sentence_list, similarity_matrix, topN):
# find the index of sentence in list
index = sentence_list.index(sentence)
# get the corresponding row in similarity matrix
similarity_row = np.array(similarity_matrix[index, :])
# get the indices of top similar
indices = similarity_row.argsort()[-topN:][::-1]
return [sentence_list[i] for i in indices]
for i, value in enumerate(get_top_similar(comment, comment_list, cosine_similarity_matrix, 20)):
print("Top similar comment {}: {}".format(i+1, value))
###Output
Top similar comment 1: Unfortunatelty for human kind and by the way biodeversity, her beauty is as
big as her stupidity and ignorance; I mean she has a so low consciousness
level, and no scientific thinking basis to understand such questions. She
should just talk about fashion, shopping, like other girls like her, something
more in her scope. Or maybe I am wrong, because she has seemed to study a bit
some Physics, and so maybe she can understand these kind of questions but then
she is so bad, so dishonest with so low values, that she is just corrupted by
big companies for the best of her wallet and the worst for the world.
We could make a song for her after the one from Nirvana, like : the girl who
solds the world; which is more appropiate and it rhymes at least
Top similar comment 2: Truly this girl does not understand the meaning of the word skeptic. Skeptic
(greek is my language) means thinking, everyone thinks but thinking as beliefs
are different than facts. Also, this girl talks about humans like they are the
only thing that should walk on this earth. It's all about us. The none ending
self-centered human selfishness on denying terribly actions on the name of the
innovation because some humans benefited. Progress & innovation that she is
talking about has come with a high cost (killing each other, killing animals,
posing our lands etc). Each of us has the right to think and question things
but not put our loudmouths to promote climate denial which we are facing each
day unless you have FACTS.
Top similar comment 3: At this point I ask myself, how dumb must a lie be, how transparent must a
hoax be, how obvious must a deceipt be .. to be recognized by todays young
people. This girl is much younger that most of these FFF idiots and she
recognized it as early as in school by simply adding one plus one. All these
climate believers should be embarrassed.
Top similar comment 4: The girl has been misled, as have most of the commenters. It's hard to believe
that persons are willing to destroy the world we know for whatever selfish
reason they may have. The science is overwhelming that without an all-out
effort to head off the world's greatest threat life will become far more
difficult for most living things. I have spent nine years in retirement
following the science. There is simply no credible research to support the
idea that planetary overheating isn't happening, that it isn't setting off
more powerful weather events, that it isn't causing sea level rise that will
devour the habitation of hundreds of millions of people, that it isn't
contributing the a vast loss of marine life, that it won't cause mass
migrations and social disruption, that it won't disrupt your way of life. The
evidence increases that time is running out. People who refuse to investigate
the research are wilfully blind or morally corrupt. It's nuts that we would
let this happen for any short-term profit or a desire to belong to your
misinformed group rather than allow yourself to see what is happening.
Top similar comment 5: - Calling the girl a little Nazi and denigrating her parents
because you don't like what she is saying does not speak very well for your
character!... I find it very refreshing to listen to a young girl who can
think and speak for herself!!... only students who lack basic intelligence
would believe all the crap that they are being indoctrinated in from a very
young age, this is happening in schools all around the world, thankfully she
is not alone which gives hope for the future that the next generated just
might not mature to be the moronic robots who worship the puppet Greta, and
blindly follow and believe all her scripted bullshit! Think for a second if
you can, about the massive money and power that is behind promoting Thumberg
and her nonsense for their own agenda, and not for the good of mankind or the
world!!
Top similar comment 6: LOL she is "questioning" the narrative instead of promoting it she says. She
is just instead promoting the typical opposite climate skeptics narrative. She
is obviously clueless about climate change science (and science in general),
but she reminds us "science is based on intellectual humility". Humility would
be to not call "incoherent" models that she cannot understand even
qualitatively. She is truly a greta-like person, telling a scripted discourse
that she does not really grasp; she just chose the opposite camp.
Top similar comment 7: Are you seriously?! Yes, how much can you put on the screen little girls who
dare to speak smartly on scientific topics in which they have no competence.
All these conversations are pure propaganda, for the purpose of which these
little girls are used. Enough propaganda! Stop holding people for morons!
Better let these sponsors spend their money on providing free high-quality
academic education to all segments of the population. And all the modern
institutes of propaganda of fashionable parties of feminists, eco-activists
and LGBT people need to be closed. Stop growing idiots! After all, one day
these idiots will begin to occupy places in the government, become employees
at nuclear power plants, etc. The only real threat to our entire planet is
idiots living under the influence of propaganda.
Top similar comment 8: I like how she comes in introduced by Heartland's James Taylor, who's a true
piece of shit, even by the standards of lying scumbag deniers. But I suppose
that's beside the point...
@ 2:14 _" These days, climate change science really isn't a science at all."_
In case anyone with an IQ above a houseplant is watching, that's her
acknowledging that she doesn't have the first clue of what she's talking
about.
_" These self-proclaimed scientists - we've heard it today - draw their
conclusions before even testing their hypothesis, and they base their
assumptions on completely incoherent models."_
... said the teenager to generations worth of 's PhD researchers into
the physical sciences. I'm sure those "self-proclaimed" scientists at the US
National Academy of Sciences, and in the physics faculties at every major
American university, will find her Einstein-like insights into the physical
sciences quite compelling.
Top similar comment 9: Give that girl a steak...She needs protien..She looks so sickly..That Vegan
diet will kill you..Stupid liberal loonies..god help us..!!
Top similar comment 10: Poor girl looks sick, she is paid and used by the global elites to promote the
agenda of the global elites and to protect their investments and corporations.
Heartland Institute made millions from creating disinformation and anti-
science for the tobacco industry in the 80s and now are paid millions by the
fossil fuel corporations to spread anti-science once again. It's groups like
these that can dangerously bring back into the dark ages. Wake up to
the real conspiracy people, follow the money! The science behind man-made
Climate Change is bloody real and serious... Don't let these greedy fools get
in the way of saving .
Top similar comment 11: Smart girl she is indeed. Is there more like her? Only mistake, she is too
young, is to believing in science. Today science is farce, an atheistic
religion based on theories, assumptions and mathematics, almost never on
facts.
Top similar comment 12: @ Oh dear! Wong again. I know absolutely nothing about climate, but I'm
going to explain everything to you! Einstein was more than right about his
comparison between human stupidity and infinity; the former has few limits.
Greta is Great🤣😂🤡. But a sick, manipulated child who needs medical treatment.
Every day☹.
Top similar comment 13: @ the issue is pretty much settle. At least in the scientific
comunity. You could talk about the extend and results, but the question
anthropogenic global climate change: yes or no is answered to 99%. The problem
is that you can bring as much good studies as you want, it doesn't gonna
change what ppl. believe. You can see it in the comment section here. It's not
about facts, it's about beliving. You have here an echo chamber of self
aclaimed "critical thinkers" and "scientist philosopher" that would refute any
good study that contratict their mantra of "climate change is fake". I doubt
that many here read any scientific studies or are even able to do it. A great
example is the video itself. You have a 19y old girl starting with her little
story that add nothing to why she should have an expertise on that subjects. I
laughed real hard hearing a 19y old self proclaimed sceptic (clearly showing
she jump on every bandwagon, true or not, she statet herself she followed the
opposit view) describing why science is wrong. Bashing scientist and "showing
us the problem of science" while she has no proper education on that mater.
Top similar comment 14: not to be blunt but she is so unbearably stupid. save this poor girl.
Top similar comment 15: How sad - she doesn't undertand that good science is based on peer review -
that is the critical assessment of any research and conclusions made on the
basis of that research. Further, tood science must be free of bias and vested
interests and be based on facts and evidence alone. It's fine to be a sceptic,
that's what good science is all about and she rightly states this - but that
is the purpose of peer review - to be sceptical. There is consensus among
climate scientists around the world, that the present climate change is due to
human activities. This consensus is based on peer reviewed research by
scientists with a background in climate-related fields. These are not articles
written by someone with a degree in engineering or a physicist with speciality
on optics, or someone with a PhD in Chinese philosophy... The vast majority of
the "climate-denier" scientists belong to the latter group, they do not have a
relevant background and their publications are rarely peer-reviewed. Or, they
are supported by Big Oil.(Yes, follow the money.) Instead of blindly believing
any article that comes out, it pays to go behind the scene and find out if
this person actually knows what he/she is talking about or if there is a
vested interest behind... Also, she says that the "climate-alarmists" are
ashamed of human achievements. Good Lord what crap! No one I have ever heard
of is ashamed of our achievements, but we can admit that certain things are
not going in the right direction and that we should do something about it
(e.g. pollution, over-fishing, biodiversity loss, over population, climate
change, etc…) It is precisely due to our achievements, that we now have the
technology and ability to actually do something about our various problems and
crisis. The problem is people who believe we just can continue what we are
doing, continue to consume more resources than can be regenerated, continue to
expand areas that have become uninhabitable, continue to degrade our planet...
Business as usual is not an option - there is no Planet B.
Top similar comment 16: It is a shame how this girl is insulting thousands of highly educated
scientist, studying weather and climate for decades. Naomi, please ask
yourself honstly if you realy believe what you are telling here. This video is
full of lies. What you call the thuth is simply lies, lies lies. Stop selling
yourself to the fossil fuel industry.
Of course mankind can be proud of what was achieve, but today we must
radically change our energy sources. Coal and Oil are death.
Top similar comment 17: This is a high and low point for . Despite of the medical and
scientific advancements, ignorance is still a real thing. A point that is
stated is: How self proclaimed scientists are not proving their hypothesis in
their facts. This is not true, as many intellectuals are and have used science
to prove themselves correct. This brings us to her opinion on science, which
is: Science is based on intellectual Humility. I disagree, for science is how
the universe works, and we cannot bend it to suit our thoughts.
Top similar comment 18: @ There is no education on the matter.It is far too complex for us
humans to comprehend. The reality is we are a specie living as nature in
nature, no more no less. Science is a study, sometimes there is good things
that come from that study, sometimes bad things come out of that study. At
this stage we have created quite a few things that maybe has damaged us as a
specie, and some that has improved us. But climate we haven't even made a dent
and i don't think we ever will. As for Naomi, she is a very intelligent girl,
who lays no claim to having any superior knowledge of science. She is just
advising people not to accept all they hear from the mainstream media on
climate, because there is no debate allowed on the subject. Which should be a
clue that there is an agenda. They call you a climate denier, what does that
mean, i don't know of anybody who is a climate denier. Most people are aware
that the climate constantly changes, the why is where the debate lies. By the
way, i have noticed, that people have a tendency to judge others and put a
label on them, like Naomi jumping on a bandwagon, how do you know why she is
doing this, she may have very good reason to make a stand, which to me
personally, i believe she has good reason, and has more courage than most of
us, who sit down at our computers, to write our comments.
Top similar comment 19: @ The girl's name is Naomi Seibert. If U looked at the title
of video U would at least know that much! Now for her real bck gnd go to
wikipedia. It say she grad HS, not college at 16. She was involved in a few
sci fairs, but their is nothing about her having a BS degree or any claim that
she authored ANY papers in climate sci or anything like that. She works for
the Heartland Institute, a right wing anti thinking tank, which is a POLITICAL
organization not a scientific one. Nothing she says uses ANY sci or principles
based on the laws of established sci. She is not a trained scientist/engineer
and has no ability to back up anything she says with any PROVEN peer reviewed
sci. Thats not what the Heartland Institute hired her 4. They know if she
tried to actually prove what she says and would be evaluated by scientist
using real sci they would make a complete fool of her because she isnt capable
of doing anything like that. Yet all the GW experts have to do that EVERY day
just to have their ideas heard. Maybe U should actually look to see if she has
any experience or training to back up what she actually says. Then U could
judge her fairly.
Top similar comment 20: Ok, she's talking about " science " all the time, BUT she hasn't quoted any
DATA any RESEARCH, she said OH WHAT A WONDERFUL WORLD WE LIVE IN , And I say :
Really? What a wonderful world you, as a mid-class European girl live in, go
and ask the rest of the " World " , said that you have to realize that the
being for climate change is not being against progress!
|
FinalProj/ML_HW04.ipynb | ###Markdown
Copyright (C) 2022 Sobhan Moradian Daghigh and s.o who's unknown :) Date: 2/20/2022
###Code
from amalearn.reward import RewardBase
from amalearn.agent import AgentBase
from amalearn.environment import EnvironmentBase
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
import gym
class Environment(EnvironmentBase):
obstacles = [(1, 7), (1, 8), (2, 7), (2, 8), (3, 7), (3, 8), (4, 7), (4, 8),
(12, 6), (12, 7), (13, 6), (13, 7), (14, 6), (14, 7), (15, 6), (15, 7),
(8, 13), (8, 14), (8, 15), (9, 13), (9, 14), (9, 15)]
def __init__(self, actionPrice, goalReward, punish, obstacles=obstacles,
i_limit=15, j_limit=15, p=0.8, goal=(1, 1), start=(15, 15), container=None):
""" initialize your variables """
state_space = gym.spaces.MultiDiscrete([i_limit, j_limit])
action_space = gym.spaces.Discrete(9)
super(Environment, self).__init__(action_space, state_space, container)
self.state_space = state_space
self.obstacles = obstacles
self.actionPrice = actionPrice
self.goalReward = goalReward
self.punish = punish
self.i_limit = i_limit
self.j_limit = j_limit
self.p = p
self.goal = goal
self.start = start
self.state = start
self.state_p = None
# -------------------------------------------------------------------------------------------------------------
def isStatePossible(self, state):
"""if given state is possible (not out of the grid and not obstacle) return ture"""
i_in = range(1, self.i_limit + 1)
j_in = range(1, self.j_limit + 1)
return False if state in self.obstacles or state[0] not in i_in or state[1] not in j_in else True
# -------------------------------------------------------------------------------------------------------------
def isAccessible(self, state, state_p):
"""if given state is Accesible (we can reach state_p by doing an action from state) return true"""
if self.isStatePossible(state) and self.isStatePossible(state_p):
if (np.abs(np.subtract(state, state_p)) <= (1, 1)).all():
return True
return False
# -------------------------------------------------------------------------------------------------------------
def getTransitionStatesAndProbs(self, state, action, state_p):
"""return probability of transition or T(sp,a,s)"""
_, actions = self.available_actions(state)
if action in actions:
available_states = self.available_states(actions, state)
if self.next_state(action, state) == state_p:
return self.p
elif state_p in available_states:
return (1 - self.p) / (len(available_states) - 1)
else: return 0
else: return 0
# -------------------------------------------------------------------------------------------------------------
def getReward(self, state, action, state_p):
"""return reward of transition"""
# The Goal Achieved
if state_p == self.goal:
return self.goalReward
elif self.isAccessible(state, state_p):
return self.actionPrice
# Hit the obstacles
else:
return self.punish
# -------------------------------------------------------------------------------------------------------------
def calculate_reward(self, action):
return
# -------------------------------------------------------------------------------------------------------------
def terminated(self, state):
return state == self.goal
# -------------------------------------------------------------------------------------------------------------
def available_actions(self, state):
actions = []
numbers = []
num = 0
for i in range(-1, 2):
for j in range(-1, 2):
state_p = tuple(np.add(state, (j, i)))
if self.isAccessible(state, state_p):
actions.append((j, i))
numbers.append(num)
num += 1
return numbers, actions
# -------------------------------------------------------------------------------------------------------------
def action_num2dim(self, num):
if num < 3:
return (np.mod(num, 3) -1, -1)
elif num < 6:
return (np.mod(num, 3) -1, 0)
else:
return (np.mod(num, 3) -1, 1)
# -------------------------------------------------------------------------------------------------------------
def get_states(self):
states = []
for i in range(1, self.i_limit + 1):
for j in range(1, self.j_limit + 1):
if self.isStatePossible((i, j)):
states.append((i, j))
return states
# -------------------------------------------------------------------------------------------------------------
def available_states(self, actions, state):
states_p = []
for action in actions:
states_p.append(self.next_state(action, state))
return states_p
# -------------------------------------------------------------------------------------------------------------
def next_state(self, action, state):
return tuple(np.add(state, action))
# -------------------------------------------------------------------------------------------------------------
def reset(self):
self.state = self.start
# -------------------------------------------------------------------------------------------------------------
def observe(self):
return self.state
# -------------------------------------------------------------------------------------------------------------
def render(self):
return
# -------------------------------------------------------------------------------------------------------------
def close(self):
return
class Agent(AgentBase):
def __init__(self, environment, theta=0.1, discount=0.9, free_stay=False):
#initialize a random policy and V(s) = 0 for each state
self.environment = environment
self.width = self.environment.i_limit
self.height = self.environment.j_limit
#init V
self.V = [[0] * (self.width + 1) for _ in range(self.height + 1)]
#init policy
self.policy = np.random.randint(0, 9, (self.width + 1, self.height + 1))
super(Agent, self).__init__(id, environment)
self.discount = discount
self.theta = theta
self.free_stay = free_stay
# -------------------------------------------------------------------------------------------------------------
def policy_evaluation(self):
while True:
delta = 0
pre_delta = delta
for state in self.environment.get_states():
v = self.V[state[0]][state[1]]
action = self.policy[state[0]][state[1]]
numbers, actions = self.environment.available_actions(state)
value = 0
for act in actions:
state_p = self.environment.next_state(act, state)
if self.free_stay:
reward = 0 if act == (0, 0) else self.environment.getReward(state, environment.action_num2dim(action), state_p)
else:
reward = self.environment.getReward(state, environment.action_num2dim(action), state_p)
probability = self.environment.getTransitionStatesAndProbs(state, environment.action_num2dim(action), state_p)
value += probability * (reward + self.discount * self.V[state_p[0]][state_p[1]])
self.V[state[0]][state[1]] = value
pre_delta = delta
delta = max([delta, np.abs(v - self.V[state[0]][state[1]])])
if delta < self.theta or delta == pre_delta:
break
return self.V
# -------------------------------------------------------------------------------------------------------------
def policy_improvement(self):
unchanged = True
for state in self.environment.get_states():
pre_action = self.policy[state[0]][state[1]]
acts = []
numbers, actions = self.environment.available_actions(state)
for _, act1 in zip(numbers, actions):
value = 0
for _, act2 in zip(numbers, actions):
state_p = self.environment.next_state(act2, state)
reward = self.environment.getReward(state, act1, state_p)
probability = self.environment.getTransitionStatesAndProbs(state, act1, state_p)
value += probability * (reward + self.discount * self.V[state_p[0]][state_p[1]])
acts.append(value)
best_act = np.argmax(acts)
self.policy[state[0]][state[1]] = numbers[best_act]
if numbers[best_act] != pre_action:
unchanged = False
return unchanged
# -------------------------------------------------------------------------------------------------------------
def value_opt_func(self):
while True:
delta = 0.1
pre_delta = delta
for state in self.environment.get_states():
acts = []
numbers, actions = self.environment.available_actions(state)
for _, act1 in zip(numbers, actions):
value = 0
for _, act2 in zip(numbers, actions):
state_p = self.environment.next_state(act2, state)
reward = self.environment.getReward(state, act1, state_p)
probability = self.environment.getTransitionStatesAndProbs(state, act1, state_p)
value += probability * (reward + self.discount * self.V[state_p[0]][state_p[1]])
acts.append(value)
best_act = np.max(acts)
pre_delta = delta
delta = max([delta, np.abs(best_act - self.V[state[0]][state[1]])])
self.V[state[0]][state[1]] = best_act
if delta < self.theta or delta == pre_delta:
break
return self.V
# -------------------------------------------------------------------------------------------------------------
def value_extraction(self):
for state in self.environment.get_states():
pre_action = self.policy[state[0]][state[1]]
acts = []
numbers, actions = self.environment.available_actions(state)
for _, act1 in zip(numbers, actions):
value = 0
for _, act2 in zip(numbers, actions):
state_p = self.environment.next_state(act2, state)
reward = self.environment.getReward(state, act1, state_p)
probability = self.environment.getTransitionStatesAndProbs(state, act1, state_p)
value += probability * (reward + self.discount * self.V[state_p[0]][state_p[1]])
acts.append(value)
best_act = np.argmax(acts)
self.policy[state[0]][state[1]] = numbers[best_act]
# -------------------------------------------------------------------------------------------------------------
def policy_iteration(self):
unchanged = False
while not unchanged:
self.V = self.policy_evaluation()
unchanged = self.policy_improvement()
# -------------------------------------------------------------------------------------------------------------
def value_iteration(self):
self.V = self.value_opt_func()
self.value_extraction()
# -------------------------------------------------------------------------------------------------------------
def take_action(self, mode='policy') -> (object, float, bool, object):
if mode == 'policy':
self.policy_iteration()
elif mode == 'value':
self.value_iteration()
# -------------------------------------------------------------------------------------------------------------
def visualize_policy(self):
plt.gcf().set_size_inches(5, 5)
ax = plt.gca()
ax.set_xticks(range(1, environment.i_limit + 1))
ax.set_yticks(range(environment.j_limit, 0, -1))
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
for i in range(environment.i_limit, 0, -1):
temp = (environment.i_limit + 1) - i
for j in range(environment.j_limit, 0, -1):
num = agent.policy[i, j]
plt.gca().text(j - 0.5, temp - 0.5, str(num), va='center', ha='center')
plt.gca().add_patch(Rectangle((j, temp), -1, -1, fill=True, color='white', alpha=0.5))
plt.grid(True)
plt.show()
# -------------------------------------------------------------------------------------------------------------
def visualize_values(self):
value = self.V
plt.gcf().set_size_inches(7, 7)
ax = plt.gca()
ax.set_xticks(range(1, environment.i_limit + 1))
ax.set_yticks(range(environment.j_limit, 0, -1))
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
for i in range(environment.i_limit, 0, -1):
temp = (environment.i_limit + 1) - i
for j in range(environment.j_limit, 0, -1):
c = round(value[i][j])
plt.gca().text(j - 0.5, temp - 0.5, str(c), va='center', ha='center')
plt.gca().add_patch(Rectangle((j, temp), -1, -1, fill=True, color='white', alpha=0.5))
plt.grid(True)
plt.show()
# -------------------------------------------------------------------------------------------------------------
def get_path(self):
dead = self.width * self.height
achived = False
path = []
states = []
start = self.environment.start
curr = start
count = 0
while count < dead:
num = self.policy[curr[0]][curr[1]]
path.append(num)
states.append(curr)
direction = self.environment.action_num2dim(num)
curr = tuple(np.add(curr, direction))
count += 1
if self.environment.terminated(curr):
achived = True
break
return path, states[1:], achived
def plotter(environment, agent):
plt.gcf().set_size_inches(10, 10)
ax = plt.gca()
ax.set_xticks(range(1, environment.i_limit + 1))
ax.set_yticks(range(environment.j_limit, 0, -1))
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
path, states, achived = agent.get_path()
no = 1
for i in range(environment.i_limit, 0, -1):
temp = (environment.i_limit + 1) - i
for j in range(environment.j_limit, 0, -1):
if (i, j) == environment.start:
plt.gca().text(j - 0.5, temp - 0.5, str('S'), va='center', ha='center')
plt.gca().add_patch(Rectangle((j, temp), -1, -1, fill=True, color='purple', alpha=0.5))
if (i, j) in environment.obstacles:
plt.gca().add_patch(Rectangle((j, temp), -1, -1, fill=True, color='black', alpha=0.5))
elif (i, j) in states:
num = path[states.index((i, j))]
direction = environment.action_num2dim(num)
plt.gca().text(j - 0.5, temp - 0.5, str(no), va='center', ha='center')
plt.gca().add_patch(Rectangle((j, temp), -1, -1, fill=True, color='blue', alpha=0.5))
no += 1
elif (i, j) == environment.goal:
plt.gca().text(j - 0.5, temp - 0.5, str('F'), va='center', ha='center')
plt.gca().add_patch(Rectangle((j, temp), -1, -1, fill=True, color='green', alpha=0.5))
plt.grid(True)
plt.show()
###Output
_____no_output_____
###Markdown
Q1.
###Code
environment = Environment(actionPrice=-0.01, goalReward=1000, punish=-1)
agent = Agent(environment)
agent.take_action()
plotter(environment, agent)
agent.visualize_policy()
###Output
_____no_output_____
###Markdown
Q2.
###Code
environment = Environment(actionPrice=0, goalReward=1000, punish=-0.01)
agent = Agent(environment)
agent.take_action()
plotter(environment, agent)
agent.visualize_policy()
###Output
_____no_output_____
###Markdown
Q3.
###Code
environment = Environment(actionPrice=-1, goalReward=100, punish=-10)
agent = Agent(environment, free_stay=True)
agent.take_action()
plotter(environment, agent)
agent.visualize_policy()
###Output
_____no_output_____
###Markdown
Q4.
###Code
discount_factors = [0.1, 0.01, 0.001, 0]
for discount in discount_factors:
print('|_ Discount Factor: {}'.format(discount))
environment = Environment(actionPrice=-0.01, goalReward=1000, punish=-1)
agent = Agent(environment, discount=discount)
agent.take_action()
plotter(environment, agent)
agent.visualize_policy()
###Output
|_ Discount Factor: 0.1
###Markdown
Q5.
###Code
environment = Environment(actionPrice=-0.01, goalReward=1000, punish=-1)
agent = Agent(environment)
agent.take_action(mode='value')
plotter(environment, agent)
agent.visualize_policy()
###Output
_____no_output_____
###Markdown
Extra.
###Code
obstacles = [(1, 7), (1, 8), (2, 7), (2, 8), (3, 7), (3, 8), (4, 7), (4, 8),
(6, 4), (6, 5), (6, 6), (6, 7), (6, 8), (6, 9), (6, 10),
(7, 4), (7, 5), (7, 6), (7, 7), (7, 8), (7, 9), (7, 10),
(8, 4), (8, 5), (9, 4), (9, 5),
(12, 6), (12, 7), (13, 6), (13, 7), (14, 6), (14, 7), (15, 6), (15, 7),
(8, 13), (8, 14), (8, 15), (9, 13), (9, 14), (9, 15)]
environment = Environment(obstacles=obstacles, actionPrice=-0.01, goalReward=1000, punish=-1)
agent = Agent(environment)
agent.take_action()
plotter(environment, agent)
agent.visualize_policy()
###Output
_____no_output_____ |
module2-sampling-confidence-intervals-and-hypothesis-testing/LS_DS_132_Sampling_Confidence_Intervals_and_Hypothesis_Testing_Assignment.ipynb | ###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
# TODO - your code!
# Start by importing the necessary libraries
import pandas as pd
import scipy.stats as sps
import numpy as np
df= pd.read_csv(r'C:\Users\Administrator\Downloads\house-votes-84.data', header=None)
# Fix column names to match the issue voted on
df = df.rename(columns={0:'party', 1:'handicapped-infants', 2:'water-project',
3:'budget', 4:'phys-fee-freeze', 5:'elsalvador-aid', 6:'religious-groups-in-schools',
7:'anti-satellite', 8:'nicaraguan-aid', 9:'mx-missile', 10:'immigration',
11:'synfuels', 12:'education', 13:'superfund', 14:'crime', 15:'duty-free-exp',
16:'export-adm-sa'})
df.head()
# Change votes to numeric format with 1 representing 'y', and 0 represeting 'n'. NaN fills for '?'
df= df.replace({'y':1, 'n':0, '?':np.NaN})
dem = df[df['party']== 'democrat']
rep = df[df['party']== 'republican']
def statsGet(self, sample=True, confidence=0.95, get='none', h0='none'):
'''This section of the function will place the passed data into a numpy array with the variable name data.
Secondly defines N, mean, and variance.'''
data= np.array(self)
N= len(data)
mean= sum(data)/N
'''Define the function for finding variance'''
def variance(data, sample=True):
if sample!= True:
diff= [x- mean for x in data]
variance= sum([i**2 for i in diff])/N
return variance
else:
diff= [x- mean for x in data]
variance= sum([i**2 for i in diff])/(N-1)
return variance
'''Define the function for finding the sample deviation'''
def deviation(data, sample=True):
if sample!= True:
return variance(data, sample=False)**.5
else:
return variance(data)**.5
'''Define the function for finding the standard error'''
def stderr(data, sample=True):
if sample!= True:
return deviation(data, sample=False)/(N**.5)
else:
return deviation(data)/(N**.5)
'''Define Interval'''
def interval(data, sample=True):
if sample!= True:
return stderr(data, sample=False)* sps.t.ppf((1 + confidence) / 2, N)
else:
return stderr(data)* sps.t.ppf((1 + confidence) / 2, N - 1)
def format2(value):
return '{:.2f}'.format(value)
if sample!= True:
'''Values for statistical analysis for population data.'''
if get == 'none':
raise ValueError('No analysis requested')
if get == 'ci':
return [mean-interval(data, sample=False), mean+interval(data, sample=False)]
if get == 'ttest':
if h0 == 'none':
raise ValueError('Null Hypothesis not indicated.')
else:
return sps.ttest_1samp(data, h0, nan_policy='omit')
if get == 'dev':
return deviation(data, sample=False)
'''Values for statistical analysis for sample data.'''
else:
if get == 'none':
raise ValueError('No analysis requested')
if get == 'ci':
return [mean-interval(data), mean+interval(data)]
if get == 'ttest':
if h0 == 'none':
raise ValueError('Null Hypothesis not indicated.')
else:
return sps.ttest_1samp(data, h0, nan_policy='omit')
if get == 'dev':
return deviation(data)
statsGet(dem['budget'].dropna(), get='ci')
'''This is a most extremely simple graph that I made in ~15 seconds.'''
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
dem_budget= dem['budget'].dropna()
dem_water= dem['water-project'].dropna()
fig = plt.figure(figsize=(10, 8))
plt.subplots()
dem_budget_mean= dem['budget'].dropna().mean()
plt.bar(1, dem_budget_mean)
plt.vlines(1, statsGet(dem_budget, get='ci')[0], statsGet(dem_budget, get='ci')[1])
dem_waterp_mean= dem_water.mean()
plt.bar(2, dem_waterp_mean)
plt.vlines(2, statsGet(dem_water, get='ci')[0], statsGet(dem_water, get='ci')[1])
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
import pandas as pd
import numpy as np
import scipy.stats as stats
bills = {
'republican': 'party',
'n': 'infants',
'y': 'cost-sharing',
'n.1': 'budget',
'y.1': 'fee-freeze',
'y.2': 'el-salvador',
'y.3': 'religion',
'n.2': 'test-ban',
'n.3': 'nicaraguan-contras',
'n.4': 'mx-missile',
'y.4': 'immigration',
'?': 'corporation-cutback',
'y.5': 'education',
'y.6': 'right-to-sue',
'y.7': 'crime',
'n.5': 'duty-free-exports',
'y.8': 'export-administration'
}
politics = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data')
politics = politics.rename(columns = bills)
politics = politics.replace({'y': 1, 'n': -1, '?': 0})
politics.head()
dem = politics[politics['party'] == 'democrat']
rep = politics[politics['party'] == 'republican']
print(dem.head(), '/n', rep.head())
#note to Curtis: the first number labeld 'statistic' is NOT the t-value... even though
#the internet says it is... do more reading.
stats.ttest_1samp(rep['religion'], 0)
#we are postulating that republicans are more in favor of religion in schools than would be
#determined by random chance, so our null hypothesis is 0. If republicans were randomly
#for or against the issue, they would be evenly split, and the 1's and -1's would cancel,
#giving us a total of 0.
n = len(rep)
conf = .95
t_stat = stats.t.ppf((1 + conf) / 2.0, n - 1)
dof = len(rep) - 1
stdev = np.std(rep['religion'], ddof = 1)
stderr = stdev/np.sqrt(n)
error = stderr * stats.t.ppf((1 + conf) / 2.0, dof)
error
lower_bound = rep['religion'].mean() - error
lower_bound
upper_bound = rep['religion'].mean() + error
upper_bound
#we clean this up and make a function to call again and again:
def conf_interval(data, conf = .95): #set default to 95%
n = len(data)
t_stat = stats.t.ppf((1 + conf) / 2.0, n - 1)
dof = len(data) - 1
stdev = np.std(data, ddof = 1)
stderr = stdev/np.sqrt(n)
error = stderr * stats.t.ppf((1 + conf) / 2.0, dof)
lower_bound = data.mean() - error
upper_bound = data.mean() + error
mean = data.mean()
return (mean, lower_bound, upper_bound)
conf_interval(rep['religion'])
#graph this ^^^
import seaborn as sns
import matplotlib.pyplot as plt
sns.kdeplot(rep['religion'])
conf_int = conf_interval(rep['religion'])
plt.axvline(x=conf_int[0], color='y')
plt.axvline(x=conf_int[1], color='r')
plt.axvline(x=conf_int[2], color='r')
plt.show;
###Output
_____no_output_____
###Markdown
This confidence interval is quite small, very tight. The associated p-value was really small. This allows us to comfortably reject the null-hypothesis that republicans are indifferent towards this bill; they seem very in favor of it.
###Code
cars = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data', header = None)
cars.head()
col_name = {0: 'buying_price',
1: 'maint_price',
2: 'number_of_doors',
3: 'max_passengers',
4: 'trunk_size',
5: 'safety',
6: 'accessorized'
}
cars = cars.rename(columns = col_name)
cars.head()
cars['buying_price'] = cars['buying_price'].replace({'vhigh': 2,
'high': 1,
'med': -1,
'low': -2
})
cars['maint_price'] = cars['maint_price'].replace({'vhigh': 2,
'high': 1,
'med': -1,
'low': -2
})
cars.head()
cars['buying_price'].value_counts()
cars['maint_price'].value_counts()
#... well that certainly looks suspicious. I get the feeling this dataset might be
#bit more cooked than I thought.
tab = pd.crosstab(cars['buying_price'], cars['maint_price'], margins = True)
tab
#Yep. Definitely cooked.
#I even found where they that they cooked it;
#"Number of Instances: 1728 (instances completely cover the attribute space)"
#Well... let's just do the best we can then.
low_buy = tab.iloc[0][0:4].values
low_buy
med_buy = tab.iloc[1][0:4].values
med_buy
high_buy = tab.iloc[2][0:4].values
high_buy
very_high_buy = tab.iloc[3][0:4].values
very_high_buy
#technically speaking I could have just used the same code four times
#and filed it under different names but... I guess it's better to
#be more generalizable then that.
#plotting for fun
import matplotlib.pyplot as plt
import seaborn as sns
fig = plt.figure(figsize=(10, 5))
sns.set(font_scale=1.8)
categories = ["low","med","high","very high"]
p1 = plt.bar(categories, low_buy, 0.55, color='#d62728')
p2 = plt.bar(categories, med_buy, 0.55, bottom=low_buy)
p3 = plt.bar(categories, high_buy, 0.55, bottom=low_buy + med_buy)
p4 = plt.bar(categories, very_high_buy, 0.55, bottom=low_buy + med_buy + high_buy)
plt.legend((p1[0], p2[0], p3[0], p4[0]), ('Low Buy', 'Med Buy', 'High Buy', 'Very High Buy'))
plt.xlabel('Price of Maintenance')
plt.ylabel('Count')
plt.show()
row_sum = tab.iloc[0:4, 4].values
row_sum
col_sum = tab.iloc[4, 0:4].values
col_sum
total = tab.loc['All', 'All']
total
exp = []
for row in row_sum:
exp_row = []
for col in col_sum:
exp_val = col * row / total
exp_row.append(exp_val)
exp.append(exp_row)
exp
#make the list into an array so we can do math on it
exp = np.array(exp)
exp
obs = pd.crosstab(cars['buying_price'], cars['maint_price']).values
obs
#pretty obvious how this is about to go down, but here we go
chi_sqrd = ((obs - exp)**2 / exp).sum()
chi_sqrd
dof = (len(row_sum)-1)*(len(col_sum)-1)
dof
chi_sqrd, p_val, dof, exp = stats.chi2_contingency(obs)
print(chi_sqrd, '\n',
p_val, '\n',
dof, '\n',
exp)
###Output
0.0
1.0
9
[[108. 108. 108. 108.]
[108. 108. 108. 108.]
[108. 108. 108. 108.]
[108. 108. 108. 108.]]
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
# TODO - your code!
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import seaborn as sns
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data
# Load Data
df = pd.read_csv('house-votes-84.data',
header=None,
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'])
print(df.shape)
df = df.replace({'?':np.NaN, 'n':0, 'y':1}) #use this to create bool table
df.head()
dem = df[df['party'] == 'democrat']
rep = df[df['party'] == 'republican']
dem.head()
rep.head()
cols = list(rep.columns)
cols.remove('party')
cols
party_stats = {} # creates a empty dictionary
for party in ['democrat', 'republican']:
party_data = df[df['party']==party]
party_stats[party] = {'means': [], 'confidence_intervals': [], 'standard_errors': [], 'margins_of_error': []}
for c in cols:
n = party_data[c].count()
dof = n - 1
mean = party_data[c].mean()
std_err = np.std(party_data[c],ddof=1) / np.sqrt(n)
confidence_interval = stats.t.interval(0.95, dof, loc=mean, scale=std_err)
margin_of_error = std_err * stats.t.ppf(0.975, dof)
party_stats[party]['means'].append(mean)
party_stats[party]['confidence_intervals'].append(confidence_interval)
party_stats[party]['margins_of_error'].append(margin_of_error)
party_stats[party]['standard_errors'].append(std_err)
x = np.arange(len(cols))
width = 0.3
plt.bar(x-width/2, party_stats['democrat']['means'],
width=width, yerr=party_stats['democrat']['margins_of_error'],
color='blue', ecolor='black', label='Democrats' )
plt.bar(x+width/2, party_stats['republican']['means'],
width=width, yerr=party_stats['republican']['margins_of_error'],
color='red', ecolor='black', label='Republican' )
plt.ylabel('Point Estimate of Votes')
plt.xlabel('Issue')
plt.title('House Votes on Various Bills')
plt.legend()
plt.xticks(x, cols[1:], rotation='vertical')
plt.show()
###Output
_____no_output_____
###Markdown
Chi Sqaured Test
###Code
df = pd.read_csv('https://raw.githubusercontent.com/qweliant/GenderAndSex/master/green/pcs.csv?token=AII7DUISBJZ7Z4OWI7VPLG25RLI7K', na_values=" ?")
print(df.shape)
df.head()
df.describe(exclude='number')
df['Sex'].value_counts()
df['Sexual Orientation'].value_counts()
df = df.sort_values(by='Sexual Orientation', ascending=True)
df.head()
contingency_table = pd.crosstab([df['Sex'], df['Transgender']], df['Sexual Orientation'], margins=True)
contingency_table
###Output
_____no_output_____
###Markdown
Naming is gender_na, gender_unknown, gender_not_trans, etc
###Code
female_na_trans = contingency_table.iloc[0][0:6].values
female_na_trans
female_not_trans = contingency_table.iloc[1][0:6].values
female_not_trans
female_unknown_trans = contingency_table.iloc[2][0:6].values
female_unknown_trans
female_trans = contingency_table.iloc[3][0:6].values
female_trans
male_na_trans = contingency_table.iloc[4][0:6].values
male_na_trans
male_not_trans = contingency_table.iloc[5][0:6].values
male_not_trans
male_unknown_trans = contingency_table.iloc[6][0:6].values
male_unknown_trans
male_trans = contingency_table.iloc[7][0:6].values
male_trans
unknown_na_trans = contingency_table.iloc[8][0:6].values
unknown_na_trans
unknown_not_trans = contingency_table.iloc[9][0:6].values
unknown_not_trans
unknown_unknown_trans = contingency_table.iloc[10][0:6].values
unknown_unknown_trans
unknown_trans = contingency_table.iloc[11][0:6].values
unknown_trans
#Plots the bar chart
fig = plt.figure(figsize=(10, 5))
sns.set(font_scale=1.8)
categories = ['BISEXUAL', 'CLIENT DID NOT ANSWER', 'LESBIAN OR GAY', 'OTHER', 'STRAIGHT OR HETEROSEXUAL', 'UNKNOWN']
p1 = plt.bar(categories, female_trans, 0.55, color='#ff6b6b')
p2 = plt.bar(categories, female_unknown_trans, 0.55, bottom=female_trans, color='#61868d')
p3 = plt.bar(categories, female_na_trans, 0.55, bottom=female_trans, color='#f0dd92')
plt.legend((p3[0], p2[0], p1[0]), ('Female Does Not Answer if Trans','Female Unknown if Trans', 'FTM'), bbox_to_anchor=(0.5, 0., 0.5, 0.5))
plt.xlabel('Non Cis-Gen Females')
plt.ylabel('Count')
plt.xticks(x, categories, rotation='vertical')
plt.show()
p1 = plt.bar(categories, female_not_trans, 0.55, color='#169244')
p2 = plt.bar(categories, male_not_trans, 0.55, bottom=female_not_trans, color='#173F5F')
plt.legend((p2[0], p1[0]), ('Cis Gen Females', 'Cis Gen Men'), loc='upper right', bbox_to_anchor=(0.8, 0., 0.5, 0.5))
plt.xlabel('Cis Gen People')
plt.ylabel('Count')
plt.xticks(x, categories, rotation='vertical')
plt.show()
p1 = plt.bar(categories, male_trans, 0.55, color='#f3fe7e')
p2 = plt.bar(categories, male_unknown_trans, 0.55, bottom=male_trans, color='#420000')
p3 = plt.bar(categories, male_na_trans, 0.55, bottom=male_trans, color='#3caea3')
plt.legend((p3[0], p2[0], p1[0]), ('Male Did Not Answer if Trans', 'Male, Unknown if Trans', 'MTF' ), loc='upper right', bbox_to_anchor=(1.0, 0.5, 0.5, 0.5))
plt.xlabel('Non-Cis Gen Men')
plt.ylabel('Count')
plt.xticks(x, categories, rotation='vertical')
plt.show()
###Output
_____no_output_____
###Markdown
###Code
p1 = plt.bar(categories, unknown_trans, 0.55, color='#02383c')
p2 = plt.bar(categories, unknown_unknown_trans, 0.55, bottom=unknown_trans, color='#52de97')
p3 = plt.bar(categories, unknown_not_trans, 0.55, bottom=unknown_trans, color='#000000')
p4 = plt.bar(categories, unknown_na_trans, 0.55, bottom=unknown_trans, color='#e3c4a8')
plt.legend((p4[0], p3[0], p2[0], p1[0]), ( 'Unknown Sex, Did not Answer if Trans','Unknown Sex, Not Trans', 'Unknown Sex, Unknown if Trans', 'Unknown Sex but Trans' ), loc='upper right', bbox_to_anchor=(1.0, 0.5, 0.5, 0.5))
plt.xlabel('Sex is Unknown')
plt.ylabel('Count')
plt.xticks(x, categories, rotation='vertical')
plt.show()
contingency_table
row_sums = contingency_table.iloc[0:12, 6].values
col_sums = contingency_table.iloc[12, 0:6].values
print(row_sums)
print(col_sums)
total = contingency_table.iloc[12,6]
total
df.shape[0]
expected = []
for i in range(len(row_sums)):
expected_row = []
for column in col_sums:
expected_val = column*row_sums[i]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
print(expected.shape)
print(expected)
observed = pd.crosstab([df['Sex'], df['Transgender']], df['Sexual Orientation']).values
print(observed.shape)
observed
chi_sqr = ((observed - expected)**2/(expected)).sum()
print("Chi Sqaured", chi_sqr)
# Degrees of Freedom of a Chi-squared test
#degrees_of_freedom = (num_rows - 1)(num_columns - 1)
# Calculate Degrees of Freedom
dof = (len(row_sums)-1)*(len(col_sums)-1)
print("Degrees of Freedom: ", {dof})
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
import pandas as pd
import numpy as np
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data',
header=None,
names=['party','handicapped-infants','water-project',
'budget', 'physician-fee-freeze','el-salvador',
'religion-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels','education','right-to-sue','crime','duty-free',
'south-africa'], na_values='?')
df = df.replace({'n':0,'y':1})
# Importing and cleaning the data just like yesterday
df.head()
df.synfuels.value_counts()
synclean = df.synfuels.dropna()
# sample_means = []
# for _ in range(1000):
# sample = synclean.sample(10)
# sample_means.append(sample)
def confidence_interval(data, confidence_level=0.95):
data = np.array(data)
sample_mean = np.mean(data)
sample_size = len(data)
sample_std_dev = np.std(data, ddof=1)
standard_error = sample_std_dev / (sample_size**0.5)
margin_of_error = standard_error * stats.t.ppf((1 + confidence_level) / 2.0, sample_size - 1)
return (sample_mean, sample_mean - margin_of_error, sample_mean + margin_of_error)
confidence_interval(synclean)
# between 32% and 41% of congresspeople supported this bill
intervals = []
for issue in range(1,df.shape[1]):
df1 = df
df1 = df.iloc[:,issue].dropna()
intervals.append(df1)
# for issue in range(0, len(intervals)):
# sns.kdeplot(intervals[issue])
# CI = confidence_interval(intervals[issue])
# plt.axvline(x=CI[1], color='red')
# plt.axvline(x=CI[2], color='red')
# plt.axvline(x=CI[0], color='k');
sns.kdeplot(synclean)
CI = confidence_interval(synclean)
plt.axvline(x=CI[1], color = 'blue')
plt.axvline(x=CI[2], color = 'blue')
plt.axvline(x=CI[0], color = 'red')
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/flags/flag.data', header=None)
df = df.rename(columns = {28:'topleft',29:'botright'})
df.head()
contingency_table = pd.crosstab(df['topleft'], df['botright'], margins=True)
contingency_table
row_sums = contingency_table.iloc[0:7, 8].values
col_sums = contingency_table.iloc[7, 0:8].values
total = contingency_table.loc['All','All']
# row and col sums are lists of sums for each row and column. Total is all data.
expected = []
for i in range(len(row_sums)):
expected_row = []
for column in col_sums:
# Producing list expected_row
expected_val = column*row_sums[i]/total
expected_row.append(expected_val)
expected.append(expected_row)
# Producing list of lists
expected = np.array(expected)
observed = pd.crosstab(df['topleft'], df['botright']).values
chi_squared = ((observed - expected)**2/(expected)).sum()
degrees_of_freedom = (len(row_sums)-1)*(len(col_sums)-1)
print(f'Chi Squared: {chi_squared} \nDegrees of Freedom: {degrees_of_freedom}')
chi_squared, p_value, degrees_of_freedom, expected = stats.chi2_contingency(observed)
print(f'Chi Squared: {chi_squared}\nP-value: {p_value}\nDegrees of Freedom: {degrees_of_freedom}\nExpected: {expected}')
###Output
Chi Squared: 129.2973238309999
P-value: 8.038525286854991e-11
Degrees of Freedom: 42
Expected: [[ 0.55670103 2.90721649 0.12371134 0.55670103 2.4742268 0.06185567
4.26804124 1.05154639]
[ 1.99484536 10.41752577 0.44329897 1.99484536 8.86597938 0.22164948
15.29381443 3.76804124]
[ 0.27835052 1.45360825 0.06185567 0.27835052 1.2371134 0.03092784
2.13402062 0.5257732 ]
[ 1.48453608 7.75257732 0.32989691 1.48453608 6.59793814 0.16494845
11.3814433 2.80412371]
[ 0.18556701 0.96907216 0.04123711 0.18556701 0.82474227 0.02061856
1.42268041 0.35051546]
[ 2.59793814 13.56701031 0.57731959 2.59793814 11.54639175 0.28865979
19.91752577 4.90721649]
[ 1.90206186 9.93298969 0.42268041 1.90206186 8.45360825 0.21134021
14.58247423 3.59278351]]
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
# TODO - your code!
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.names
import pandas as pd
names=['party','handicapped-infants','water-project','budget','physician-fee-freeze', 'el-salvador-aid','religious-groups','anti-satellite-ban','aid-to-contras','mx-missile','immigration','synfuels', 'education', 'right-to-sue','crime','duty-free','south-africa']
df = pd.read_csv('house-votes-84.data', names=names)
df.shape
df.head()
import numpy as np
df = df.replace({'n':0, 'y':1, '?':np.NaN})
df.head()
from scipy import stats
def conf_in(data, con_level=.95):
data = data.dropna()
data = np.array(data)
data_mean = data.mean()
stan_err = np.std(data, ddof=1) / (np.sqrt(len(data)))
margin_err = stan_err * stats.t.ppf((1+con_level)/2.0,len(data)-1)
return(data_mean,data_mean-margin_err,data_mean+margin_err)
conf_in(df.budget)
rep = df[df.party == 'republican']
dem = df[df.party == 'democrat']
conf_in(rep.budget)
conf_in(dem.budget)
import seaborn as sns
sns.kdeplot(df.budget);
import matplotlib.pyplot as plt
ci = conf_in(df.budget)
sns.kdeplot(df.budget)
plt.axvline(x=ci[1], color='red')
plt.axvline(x=ci[2], color='red')
plt.axvline(x=ci[0], color='k');
cols = df.columns[1:17].to_list()
for i in cols:
sns.kdeplot(df[i])
ci = conf_in(df[i])
plt.axvline(x=ci[1], color='r')
plt.axvline(x=ci[2], color='r')
plt.axvline(x=ci[0], color='black')
plt.title(i)
plt.show()
cont_table1 = pd.crosstab(df.party, df['budget'], margins=True)
cont_table1
total = cont_table1.loc['All','All']
total
con_row = cont_table1.iloc[0:2,2].values
con_row
con_col = cont_table1.iloc[2, 0:2].values
con_col
expected = []
for i in range(len(con_row)):
expected_row = []
for col in con_col:
expected_val = (col*con_row[i])/total
expected_row.append(expected_val)
expected.append(expected_row)
expected
observed = pd.crosstab(df.party, df.budget).values
observed
chi_squared = ((observed - expected)**2/(expected)).sum()
chi_squared
cont_table = pd.crosstab(df.party, df['budget']).values
cont_table
chi_squared, p_value, dof, expected = stats.chi2_contingency(cont_table)
print(f"Chi-Squared: {chi_squared}")
print(f"P-value: {p_value}")
print(f"Degrees of Freedom: {dof}")
print("Expected: \n", np.array(expected))
group = df.groupby('party').sum()
group
gt = group.T
col = [i.replace('-',' ').title() for i in cols]
col
fig = plt.figure(figsize=(10, 5))
sns.set(font_scale=1.8)
p1 = plt.bar(col, gt.democrat, color='blue')
p2 = plt.bar(col, gt.republican, bottom=gt.democrat, color='r')
plt.xticks(rotation=90)
###Output
_____no_output_____
###Markdown
Stretch goals:1. Write a summary of your findings, mixing prose and math/code/results. *Note* - yes, this is by definition a political topic. It is challenging but important to keep your writing voice *neutral* and stick to the facts of the data. Data science often involves considering controversial issues, so it's important to be sensitive about them (especially if you want to publish).2. Apply the techniques you learned today to your project data or other data of your choice, and write/discuss your findings here.3. Refactor your code so it is elegant, readable, and can be easily run for all issues. Resources- [Interactive visualize the Chi-Squared test](https://homepage.divms.uiowa.edu/~mbognar/applets/chisq.html)- [Calculation of Chi-Squared test statistic](https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test)- [Visualization of a confidence interval generated by R code](https://commons.wikimedia.org/wiki/File:Confidence-interval.svg)- [Expected value of a squared standard normal](https://math.stackexchange.com/questions/264061/expected-value-calculation-for-squared-normal-distribution) (it's 1 - which is why the expected value of a Chi-Squared with $n$ degrees of freedom is $n$, as it's the sum of $n$ squared standard normals)
###Code
gr = []
mids = []
mins = []
maxs = []
for i in cols:
s = conf_in(df[i])
mids.append(s[0])
mins.append(s[2]-s[1])
maxs.append(s[2])
print(s)
mids = np.array(mids)
gr = pd.DataFrame({'mids':mids,'mins':mins,'maxs':maxs})
gr.head()
from matplotlib.pyplot import figure
figure(num=None, figsize=(12, 6), dpi=80, facecolor='w', edgecolor='k')
ax = plt.subplot()
ax.bar(col, mids)
plt.errorbar(col,mids,yerr=mins, alpha=0.5, ecolor='black', capsize=10, ls='none')
plt.xticks(rotation=90)
len(mins)
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy Load in data and clean up
###Code
# Import
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.stats as stats
# Load the data
!wget 'https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data'
# Read in data and set column names
df = pd.read_csv('house-votes-84.data', names=['party','handicapped-infants',
'water-project', 'budget', 'physician-fee-freeze',
'el-salvador-aid', 'religious-groups',
'anti-satellite-ban', 'aid-to-contras', 'mx-missile',
'immigration', 'synfuels', 'education',
'right-to-sue','crime','duty-free', 'south-africa'])
# Clean data
df = df.replace({'y':1 , 'n':0, '?':np.nan})
df.head()
# Seperate data by Democrat or Republicn
dem = df[df['party'] == 'democrat']
rep = df[df['party'] == 'republican']
dem.head()
rep.head()
###Output
_____no_output_____
###Markdown
1. Generate and numerically represent a confidence interval
###Code
# Confidence interval function
def confidence_interval(data, confidence=0.95):
data = np.array(data)
mean = np.mean(data)
n = len(data)
stderr = np.std(data, ddof=1) / np.sqrt(n)
margin_of_error = stderr * stats.t.ppf((1 + confidence) / 2.0, n - 1)
return (mean, mean - margin_of_error, mean + margin_of_error)
# USED TO SEE HOW TO HANDLE CONVERTING TO NUMPY ARRAY IN FUNCTION LATER
test = dem.values
test
# USED TO SEE HOW TO HANDLE CONVERTING TO NUMPY ARRAY IN FUNCTION LATER
test = dem.values.T
test
# Confidence interval for dataframe function
def confidence_interval_df(x):
# Create list to hold means, lower bounds, and upper bounds
CI_issue = []
# Convert dataframe into an array and transpose so we can compare the votes for
# each bill, rather than each congresspersons voting
x = x.values.T
# For loop to fill CI_issue using confidence_interval function from above while
# avoiding NaN values using isnan (arrays are easy to work with!)
for col in x:
no_nans = col[~np.isnan(col)]
CI_issue.append(confidence_interval(no_nans))
# Create dataframe from confidence_interval outputs for each issue
CI_issue = pd.DataFrame(CI_issue, columns=['Mean', 'Lower', 'Upper'])
# return the dataframe
return CI_issue
# Drop 'party' column from each dataframe to run confidence_interval_df (isnan
# doesn't like strings!)
dem_votes = dem.drop('party', axis=1)
rep_votes = rep.drop('party', axis=1)
# Run confidence_interval_df for both dem votes and rep votes
CI_dem = confidence_interval_df(dem_votes)
CI_rep = confidence_interval_df(rep_votes)
# Get the margin of error for each row and append to each dataframe
CI_dem['Margin of Error'] = CI_dem['Mean'] - CI_dem['Lower']
CI_rep['Margin of Error'] = CI_rep['Mean'] - CI_rep['Lower']
CI_dem
CI_rep
###Output
_____no_output_____
###Markdown
2. Graphically (with a plot) represent the confidence interval
###Code
CI_dem.head()
# Let's make a column with bill names for easier graphing
CI_dem['Bill'] = ['handicapped-infants',
'water-project', 'budget', 'physician-fee-freeze',
'el-salvador-aid', 'religious-groups',
'anti-satellite-ban', 'aid-to-contras', 'mx-missile',
'immigration', 'synfuels', 'education',
'right-to-sue','crime','duty-free', 'south-africa']
CI_rep['Bill'] = ['handicapped-infants',
'water-project', 'budget', 'physician-fee-freeze',
'el-salvador-aid', 'religious-groups',
'anti-satellite-ban', 'aid-to-contras', 'mx-missile',
'immigration', 'synfuels', 'education',
'right-to-sue','crime','duty-free', 'south-africa']
# Check that our Bills lined up correctly as there have been a few transposes/
# changing of the dataframe throughout the notebook
CI_dem.head(10)
# Use .mean() to compare mean values to ensure Bill names are in the right rows
dem.mean()
fig, ax = plt.subplots(figsize=(8,8))
plt.style.use('seaborn-whitegrid')
# Plot data on a bar graph with CI included
ax.bar('Bill', 'Mean', data=CI_dem, yerr='Margin of Error', color='b', alpha=.8)
### Make it look nice! ###
# Adjust yticks and labels
ax.set_yticks([0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1])
ax.set_yticklabels(['0', '10', '20', '30', '40', '50', '60', '70', '80', '90',
'100%'])
ax.set_yticks([.05, .15, .25, .35, .45, .55, .65, .75, .85, .95], minor=True)
ax.tick_params(which='major', width=1.0, length=10)
ax.tick_params(which='minor', width=1.0, length=5)
ax.set_xticklabels(CI_dem['Bill'], rotation=45, ha='right')
ax.set_ylim(0,1.05)
ax.set_title('Democratic Voting Across Bills in 1984')
plt.show()
fig, ax = plt.subplots(figsize=(8,8))
plt.style.use('seaborn-whitegrid')
# Plot data on a bar graph with CI included
ax.bar('Bill', 'Mean', data=CI_rep, yerr='Margin of Error', color='r', alpha=.8)
### Make it look nice! ###
# Adjust yticks and labels
ax.set_yticks([0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1])
ax.set_yticklabels(['0', '10', '20', '30', '40', '50', '60', '70', '80', '90',
'100%'])
ax.set_yticks([.05, .15, .25, .35, .45, .55, .65, .75, .85, .95], minor=True)
ax.tick_params(which='major', width=1.0, length=10)
ax.tick_params(which='minor', width=1.0, length=5)
ax.set_xticklabels(CI_rep['Bill'], rotation=45, ha='right')
ax.set_ylim(0,1.05)
ax.set_title('Republican Voting Across Bills in 1984')
plt.show()
fig, ax = plt.subplots(figsize=(12,8))
plt.style.use('seaborn-whitegrid')
# Use width and align parameters to seperate bars
width = .35
# Plot data on a bar graph with CI included
ax.bar('Bill', 'Mean', -width, data=CI_rep, yerr='Margin of Error', color='red',
alpha=.9, align='edge', capsize=3, label='Republican')
ax.bar('Bill', 'Mean', +width, data=CI_dem, yerr='Margin of Error', color='blue',
alpha=.9, align='edge', capsize=3, label='Democrat')
### Make it look nice! ###
# Adjust yticks and labels
ax.set_yticks([0, .1, .2, .3, .4, .5, .6, .7, .8, .9, 1])
ax.set_yticklabels(['0', '10', '20', '30', '40', '50', '60', '70', '80', '90',
'100%'])
ax.set_yticks([.05, .15, .25, .35, .45, .55, .65, .75, .85, .95], minor=True)
ax.tick_params(which='major', width=1.0, length=10)
ax.tick_params(which='minor', width=1.0, length=5)
ax.set_xticklabels(CI_rep['Bill'], rotation=45, ha='right')
#Set y_lim so some of the CI's don't get cut off
ax.set_ylim(0,1.05)
# Show legend since we have 2 different groups
ax.legend()
ax.set_title('Voting Across Bills in 1984', fontweight='bold', size=18)
plt.show()
###Output
_____no_output_____
###Markdown
3. Interpret the confidence interval - what does it tell you about the data and its distribution? On the issues where the vote is closer to 50/50 we can see that the confidence interval is much larger, while the issues where voting was closer to unanimous the confidence interval is much smaller. This is very clear in the difference in size of the confidence intervals for the 'Water Project' bill and the 'Physician Free Freeze' bill on the Republican graph. I think in laymans terms this suggests that if there were to be a revote on these two bills the outcome for Physician freeze is expected to be about the same, while the Water project bill may have quite a different outcome. 4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
headers = ['Sex', 'Length', 'Diameter', 'Height', 'Whole Weight', 'Shucked Weight', 'Viscera Weight', 'Shell Weight', 'Rings']
df1 = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data', names=headers)
# Data about harvested abalone's
print(df1.shape)
df1.head()
df1.describe()
# Create bins for number of Rings
cut_rings = [0, 5, 10, 15, 20, 25, 30]
ring_names = ['0-5', '6-10', '11-15', '16-20', '21-25', '26-29']
df1['Rings Categories'] = pd.cut(df1['Rings'], cut_rings, labels=ring_names)
df1.head()
df1 = df1.sort_values(by=['Rings Categories'], ascending=True)
df1.head()
# Compare Whole Weight to ring counts with a crosstab
crosstab = pd.crosstab(df1['Sex'],df1['Rings Categories'], margins=True)
crosstab
# Get row sums and column sums for expected values
row_sums = crosstab.iloc[0:3, 6].values
col_sums = crosstab.iloc[3, 0:6].values
# Get total observations
total = crosstab.loc['All', 'All']
total
expected = []
for i in range(len(row_sums)):
expected_row = []
for column in col_sums:
expected_val = column*row_sums[i]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
print(expected.shape)
print(expected)
observed = pd.crosstab(df1['Sex'], df1['Rings Categories']).values
print(observed.shape)
observed
###Output
(3, 6)
###Markdown
Using Numpy
###Code
# chi_squared statistic
chi_squared = ((observed - expected)**2/(expected)).sum()
print(f"Chi-Squared: {chi_squared}")
# Calculate Degrees of Freedom
dof = (len(row_sums)-1)*(len(col_sums)-1)
print(f"Degrees of Freedom: {dof}")
###Output
Degrees of Freedom: 10
###Markdown
Using Scipy
###Code
chi_squared, p_value, dof, expected = stats.chi2_contingency(observed)
print(f"Chi-Squared: {chi_squared}")
print(f"P-value: {p_value}")
print(f"Degrees of Freedom: {dof}")
print("Expected: \n", np.array(expected))
###Output
Chi-Squared: 657.8480895498792
P-value: 6.97543112037277e-135
Degrees of Freedom: 10
Expected:
[[ 59.13885564 795.08905913 371.10414173 70.40339957 10.01292794
1.25161599]
[ 60.72252813 816.38065597 381.0418961 72.28872396 10.28106296
1.28513287]
[ 69.13861623 929.53028489 433.85396217 82.30787647 11.7060091
1.46325114]]
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
!pip install pandas==0.25
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from scipy import stats
###Output
_____no_output_____
###Markdown
Confidence Interval
###Code
#import with column names and NaN values.
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data',
header=None,
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'])
df.head()
df = df.replace({'y': 1, 'n': -1, '?': 0})
df.shape
df.head()
df['party'].value_counts()
dems = df[df['party'] == 'democrat']
reps = df[df['party'] == 'republican']
dems.head()
reps.head()
def conf_inter(data, conf=0.95):
data = np.array(data)
data = data[~np.isnan(data)]
mean = np.mean(data)
n = len(data)
stderr = np.std(data, ddof=1) / np.sqrt(n)
margin_err = stderr * stats.t.ppf((1 + conf) / 2.0, n - 1)
return mean, margin_err, (mean - margin_err, mean + margin_err)
conf_inter(dems['budget'])
cols = list(df.columns[1:])
cols
means = [conf_inter(df[i])[0] for i in cols]
means
errors = [conf_inter(df[i])[1] for i in cols]
errors
fig, ax = plt.subplots()
ax.bar(np.linspace(0, len(means), len(means)), means, yerr=errors, color='green')
ax.set_xticks(range(17))
ax.set_xticklabels(cols, rotation=90);
ax.yaxis.grid(True)
ax.set_title("How is your government voting?")
plt.show()
dem_means = [conf_inter(dems[i])[0] for i in cols]
dem_errors = [conf_inter(dems[i])[1] for i in cols]
fig, ax = plt.subplots()
ax.bar(np.linspace(0, len(dem_means), len(dem_means)), dem_means, yerr=dem_errors, color='blue')
ax.set_xticks(range(17))
ax.set_xticklabels(cols, rotation=90);
ax.yaxis.grid(True)
ax.set_title("How are the dems voting?")
plt.show()
rep_means = [conf_inter(reps[i])[0] for i in cols]
rep_errors = [conf_inter(reps[i])[1] for i in cols]
fig, ax = plt.subplots()
ax.bar(np.linspace(0, len(rep_means), len(rep_means)), rep_means, yerr=rep_errors, color='red')
ax.set_xticks(range(17))
ax.set_xticklabels(cols, rotation=90);
ax.yaxis.grid(True)
ax.set_title("How are the reps voting?")
plt.show()
###Output
_____no_output_____
###Markdown
Chi testing
###Code
heads = ['age', 'op_year', 'nodes', 'survived_5yrs']
can = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/haberman/haberman.data', names=heads)
can.head()
can['survived_5yrs'] = [x if x is 1 else 0 for x in can['survived_5yrs']]
cont_table = pd.crosstab(can['survived_5yrs'], pd.cut(can['nodes'], bins=[0, 10, 20, 35, 55]), margins=True)
cont_table
bins=['0-10', '11-20', '21-35', '36-52']
died = cont_table.iloc[0][0:6]
survived = cont_table.iloc[1][0:6]
died, survived
row_sums = cont_table.iloc[0:2, 4].values
col_sums = cont_table.iloc[2, 0:4].values
row_sums, col_sums
total = cont_table.iloc[2,4]
total
exp = []
for i in range(len(row_sums)):
exp_row = []
for col in col_sums:
exp_val = col * row_sums[i]/total
exp_row.append(exp_val)
exp.append(exp_row)
exp = np.array(exp)
exp.shape, exp
chi_squared = ((pd.crosstab(can['survived_5yrs'], pd.cut(can['nodes'], bins=[0, 10, 20, 35, 55])).values - exp)**2/(exp)).sum()
print(f"Chi-Squared: {chi_squared}")
dof = (len(row_sums)-1)*(len(col_sums)-1)
print(f"Degrees of Freedom: {dof}")
chi_squared, p_value, dof, expected = stats.chi2_contingency(pd.crosstab(can['survived_5yrs'], pd.cut(can['nodes'], bins=[0, 10, 20, 35, 55])))
print(f"Chi-Squared: {chi_squared}")
print(f"P-value: {p_value}")
print(f"Degrees of Freedom: {dof}")
print("Expected: \n", np.array(expected))
###Output
Chi-Squared: 10.03651242226511
P-value: 0.01825830269331864
Degrees of Freedom: 3
Expected:
[[47.41176471 9.48235294 4.37647059 0.72941176]
[82.58823529 16.51764706 7.62352941 1.27058824]]
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
# TODO - your code!
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data')
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
#!pip install pandas==0.25.1
# TODO - your code!
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data
import pandas as pd
import numpy as np
# Load Data
df = pd.read_csv('house-votes-84.data',
header=None,
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'])
print(df.shape)
df.head()
df = df.replace({'?': np.NaN, 'n': 0, 'y': 1})
df.head()
dem = df[df['party'] == 'democrat']
rep = df[df['party'] == 'republican']
dem.head()
rep.head()
###Output
_____no_output_____
###Markdown
###Code
import scipy.stats as stats
def confidence_interval(data, confidence=0.95):
data = np.array(data)
mean = np.mean(data)
n = len(data)
stderr = np.std(data,ddof=1)/np.sqrt(n)
margin_of_error = stderr * stats.t.ppf((1 + confidence) / 2.0, n - 1)
print(margin_of_error)
return (mean, mean-margin_of_error, mean+margin_of_error)
confidence_interval(rep['water-project'].dropna())
###Output
0.08149104629696514
###Markdown
**How many republicans support the water project issue?**50.7% of them do, with a 8.1% margin of error. We're 95% sure that 42.6-58.8% of republicans support this issue.
###Code
import seaborn as sns
import matplotlib.pyplot as plt
sns.distplot(rep['water-project'].dropna())
CI = confidence_interval(rep['water-project'].dropna())
plt.axvline(x=CI[1],color='red')
plt.axvline(x=CI[2],color='red')
plt.axvline(x=CI[0],color='black')
###Output
0.08149104629696514
###Markdown
**Chi Squared Test**
###Code
!wget https://raw.githubusercontent.com/LambdaSchool/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module3-databackedassertions/persons.csv
import pandas as pd
df = pd.read_csv("persons.csv")
df.head()
df.describe()
df = df.loc[:,['weight','exercise_time']]
df.head()
weight_cut = pd.cut(df['weight'],5)
exercise_time_cut = pd.cut(df['exercise_time'],5)
weight_cut
crosstab_weight_exercise = pd.crosstab(exercise_time_cut, weight_cut, margins=True)
crosstab_weight_exercise
crosstab_weight_exercise
oneHour = crosstab_weight_exercise.iloc[0][0:5].values
twoHours = crosstab_weight_exercise.iloc[1][0:5].values
threeHours = crosstab_weight_exercise.iloc[2][0:5].values
fourHours = crosstab_weight_exercise.iloc[3][0:5].values
fiveHours = crosstab_weight_exercise.iloc[4][0:5].values
oneHour
import matplotlib.pyplot as plt
import seaborn as sns
fig = plt.figure()
sns.set(font_scale=1.1)
categories = ["99-129","129-158","158-187","187-216","216+"]
p1 = plt.bar(categories, oneHour, color='purple')
p2 = plt.bar(categories, twoHours, 0.8, color='blue')
p2 = plt.bar(categories, threeHours, 0.6, color='green')
p2 = plt.bar(categories, fourHours, 0.4, color='orange')
p2 = plt.bar(categories, fiveHours, 0.2, color='red')
plt.xlabel('Weight')
plt.ylabel('Minutes of exercise per week')
plt.show()
row_sums = crosstab_weight_exercise.iloc[0:5, 5].values
col_sums = crosstab_weight_exercise.iloc[5, 0:5].values
print(row_sums)
print(col_sums)
total = crosstab_weight_exercise.loc['All','All']
total
df.shape[0]
expected = []
for i in range(len(row_sums)):
expected_row = []
for column in col_sums:
expected_val = column*row_sums[i]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
print(expected.shape)
print(expected)
observed = pd.crosstab(exercise_time_cut, weight_cut).values
print(observed.shape)
observed
chi_squared = ((observed - expected)**2/(expected)).sum()
print(f"Chi-Squared: {chi_squared}")
dof = (len(row_sums)-1)*(len(col_sums)-1)
print(f"Degrees of Freedom: {dof}")
chi_squared, p_value, dof, expected = stats.chi2_contingency(observed)
print(f"Chi-Squared: {chi_squared}")
print(f"P-value: {p_value}")
print(f"Degrees of Freedom: {dof}")
print("Expected: \n", np.array(expected))
###Output
Chi-Squared: 344.201322954906
P-value: 1.6734133240537236e-63
Degrees of Freedom: 16
Expected:
[[86.875 77.60833333 59.77 41.46833333 12.27833333]
[98.75 88.21666667 67.94 47.13666667 13.95666667]
[69.6875 62.25416667 47.945 33.26416667 9.84916667]
[60. 53.6 41.28 28.64 8.48 ]
[59.6875 53.32083333 41.065 28.49083333 8.43583333]]
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
# TODO - your code!
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
import pandas as pd
import numpy as np
import scipy.stats as stats
!wget 'https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data'
df = pd.read_csv('house-votes-84.data',
header=None,
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'])
print(df.shape)
df.head()
df = df.replace({'?': np.NaN, 'n': 0, 'y': 1})
df.head()
dem = df[df['party'] == 'democrat']
rep = df[df['party'] == 'republican']
dem = dem.fillna(value=1)
dem.head()
rep.ffill(inplace=True)
rep.bfill(inplace=True)
rep.head()
dem['el-salvador-aid'].describe()
np.mean(dem['anti-satellite-ban'])
###Output
_____no_output_____
###Markdown
Numerically represented confidence interval
###Code
def confidence_interval(data, confidence=0.95):
data = np.array(data)
mean = np.mean(data)
n = len(data)
stderr = np.std(data, ddof=1) / np.sqrt(n)
margin_of_error = stderr * stats.t.ppf((1 + confidence) / 2.0, n - 1)
# print(margin_of_error)
return (mean, mean - margin_of_error, mean + margin_of_error)
confidence_interval(dem['anti-satellite-ban'])
###Output
_____no_output_____
###Markdown
Graphically represented confidence interval
###Code
import seaborn as sns
import matplotlib.pyplot as plt
dem['south-africa'].describe()
dem['south-africa'].plot(kind='density')
CI = confidence_interval(dem['south-africa'])
print(CI)
plt.axvline(x=CI[1], color='red')
plt.axvline(x=CI[2], color='red')
plt.axvline(x=CI[0], color='k');
sns.kdeplot(dem['el-salvador-aid'])
CI = confidence_interval(dem['el-salvador-aid'])
print(CI)
plt.axvline(x=CI[1], color='red')
plt.axvline(x=CI[2], color='red')
plt.axvline(x=CI[0], color='k');
dem['mx-missile'].head()
dem['mx-missile'].plot(kind='density')
CI = confidence_interval(dem['mx-missile'])
print(CI)
plt.axvline(x=CI[1], color='red')
plt.axvline(x=CI[2], color='red')
plt.axvline(x=CI[0], color='k');
###Output
(0.7752808988764045, 0.7248917176706724, 0.8256700800821366)
###Markdown
Interpreting the confidence intervalFor this data set, the confidence interval is showing what, given a larger population, the voting mean would be. For example, if this sample is truely a reflection of the population, then about 75% of the democratic population will vote yes on the mx-missle issue Chi squared tests
###Code
!wget 'https://archive.ics.uci.edu/ml/machine-learning-databases/balloons/adult+stretch.data'
!wget 'https://archive.ics.uci.edu/ml/machine-learning-databases/balloons/adult-stretch.data'
!wget 'https://archive.ics.uci.edu/ml/machine-learning-databases/balloons/yellow-small+adult-stretch.data'
!wget 'https://archive.ics.uci.edu/ml/machine-learning-databases/balloons/yellow-small.data'
df1 = pd.read_csv('adult+stretch.data', header=None,
names=['Color', 'size', 'act', 'age', 'inflated'])
df1.head()
df2 = pd.read_csv('adult-stretch.data', header=None,
names=['Color', 'size', 'act', 'age', 'inflated'])
df2.head()
df3 = pd.read_csv('yellow-small+adult-stretch.data', header=None,
names=['Color', 'size', 'act', 'age', 'inflated'])
df3.head()
df4 = pd.read_csv('yellow-small.data', header=None,
names=['Color', 'size', 'act', 'age', 'inflated'])
df4.head()
df5 = pd.concat([df1, df2, df3, df4])
df5.head()
contingency_table = pd.crosstab(df5['inflated'], df5['size'], margins=True)
contingency_table
###Output
_____no_output_____
###Markdown
Expected value calculation
###Code
row_sums = contingency_table.iloc[0:2, 2].values
col_sums = contingency_table.iloc[2, 0:2].values
print(row_sums)
print(col_sums)
total = contingency_table.loc['All', 'All']
total
expected = []
for i in range(len(row_sums)):
expected_row = []
for column in col_sums:
expected_val = column*row_sums[i]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
print(expected.shape)
print(expected)
observed = pd.crosstab(df5['inflated'], df5['size']).values
print(observed.shape)
observed
###Output
(2, 2)
###Markdown
Chi squared with numpy
###Code
chi_squared = ((observed - expected)**2 / (expected)).sum()
print(f'Chi-Squared: {chi_squared}')
dof = (len(row_sums) - 1) * (len(col_sums) - 1)
print(f'Degrees of Freedom: {dof}')
###Output
Degrees of Freedom: 1
###Markdown
Chi-Squared with Scipy
###Code
# Yates' correction changed my Chi-Squared value from 4.45407 to 3.53446. I do
# not know why you would want or need something like this.
chi_squared, p_value, dof, expected = stats.chi2_contingency(observed, correction=False)
print(f'Chi-Squared: {chi_squared}')
print(f'P-value: {p_value}')
print(f'Degrees of Freedom: {dof}')
print('Expected: \n', np.array(expected))
###Output
Chi-Squared: 4.454076655052262
P-value: 0.03481803457936288
Degrees of Freedom: 1
Expected:
[[19.42105263 21.57894737]
[16.57894737 18.42105263]]
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
names = ['party', 'handicapped_infants', 'water_project', 'budget', 'physician_fee_freeze', 'el_salvador_aid',
'religion_in_schools', 'anti_satellite_test_ban', 'aid_to_contras', 'mx_missile', 'immigration',
'synfuels_cutback', 'education', 'right_to_sue', 'crime', 'duty_free', 'export_south_africa']
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data',
header=None, names=names, true_values='y', false_values='n', na_values='?')
df = df.fillna(0.5)
print(df.shape)
df.head()
dem = df[df['party']=='democrat']
rep = df[df['party']=='republican']
rep.head()
# Generate and numerically represent confidence intervals.
from scipy.stats import t
n_all = len(df['party'])
n_dem = len(dem['party'])
n_rep = len(rep['party'])
dof_all = n_all - 1
dof_dem = n_dem - 1
dof_rep = n_rep - 1
mean0 = np.mean(df['mx_missile'])
mean1 = np.mean(dem['mx_missile'])
mean2 = np.mean(rep['mx_missile'])
std0 = np.std(df['mx_missile'], ddof=1)
std1 = np.std(dem['mx_missile'], ddof=1)
std2 = np.std(rep['mx_missile'], ddof=1)
std_error0 = std0/n_all**0.5
std_error1 = std0/n_dem**0.5
std_error2 = std1/n_rep**0.5
CI_0 = t.interval(.95, dof_all, loc=mean0, scale=std_error0)
CI_1 = t.interval(.95, dof_dem, loc=mean1, scale=std_error1)
CI_2 = t.interval(.95, dof_rep, loc=mean2, scale=std_error2)
print('The confidence interval of all Representatives voting for the MX Missile is', CI_0)
print('The confidence interval of Democrats voting for the MX Missile is', CI_1)
print('The confidence interval of Republicans voting for the MX Missile is', CI_2)
# Need this for the graph below.
means = [np.mean(rep['mx_missile']), np.mean(df['mx_missile']), np.mean(dem['mx_missile'])]
means
# Defining margin of error makes the following graph a lot easier to code.
import scipy.stats as stats
def margin_of_error(data, confidence=0.95):
mean = np.mean(data)
n = len(data)
stderr = np.std(data, ddof=1) / np.sqrt(n)
return stderr * stats.t.ppf((1 + confidence) / 2.0, n - 1)
print(margin_of_error(df['mx_missile']), means[1] - margin_of_error(df['mx_missile']), means[1] + margin_of_error(df['mx_missile']))
# Graphically represent the confidence intervals.
# The red bar is the Republican vote, the blue bar is the Democratic vote, and the green bar is the vote of all Representatives.
p1 = plt.bar(1, means[0], color='r', yerr=margin_of_error(rep['mx_missile']))
p2 = plt.bar(1, means[1]-means[0], bottom=means[0], color='g', yerr=margin_of_error(df['mx_missile']))
p3 = plt.bar(1, means[2]-means[1], bottom=means[1], color='b', yerr=margin_of_error(dem['mx_missile']))
plt.show()
###Output
_____no_output_____
###Markdown
Assignment: Interpret the confidence intervals - what do they tell you about the data and its distribution?Answer: The confidence intervals are relatively small here. This indicates that the standard deviation is relatively small for both Democrats and Republicans on the MX Missile vote; each group mostly voted about the same way. Chi-Squared Tests:
###Code
# Using this "adults" dataset from Sprint 1 Module 2.
column_headers = ['age', 'workclass', 'fnlwgt', 'education', 'education-num',
'marital-status', 'occupation', 'relationship', 'race', 'sex',
'capital-gain', 'capital-loss', 'hours-per-week',
'native-country', 'income']
adults = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', names=column_headers)
print(adults.shape)
adults.head()
adults.describe(exclude='number')
contingency_table = pd.crosstab(adults['income'], adults['workclass'], margins=True)
contingency_table
row_sums = contingency_table.iloc[0:2, 9].values
column_sums = contingency_table.iloc[2, 0:9].values
print(row_sums)
column_sums
total = contingency_table.loc['All', 'All']
total
expected = []
for i in range (len(row_sums)):
expected_row = []
for column in column_sums:
expected_val = column*row_sums[i]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
print(expected.shape)
expected
observed = pd.crosstab(adults['income'], adults['workclass']).values
observed
chi_squared = ((observed - expected)**2 / (expected)).sum()
chi_squared
print('Chi Squared using Scipy is', stats.chi2_contingency(observed)[0])
###Output
Chi Squared using Scipy is 1045.7085997281692
###Markdown
Stretch goals:1. Write a summary of your findings, mixing prose and math/code/results. *Note* - yes, this is by definition a political topic. It is challenging but important to keep your writing voice *neutral* and stick to the facts of the data. Data science often involves considering controversial issues, so it's important to be sensitive about them (especially if you want to publish).2. Apply the techniques you learned today to your project data or other data of your choice, and write/discuss your findings here.3. Refactor your code so it is elegant, readable, and can be easily run for all issues.
###Code
labels = ['handicapped_infants', 'water_project', 'budget', 'physician_fee_freeze', 'el_salvador_aid',
'religion_in_schools', 'anti_satellite_test_ban', 'aid_to_contras', 'mx_missile', 'immigration',
'synfuels_cutback', 'education', 'right_to_sue', 'crime', 'duty_free', 'export_south_africa']
means_dem = []
means_rep = []
means_all = []
moe_dem = []
moe_rep = []
moe_all = []
for label in labels:
means_dem.append(np.mean(dem[label]))
means_rep.append(np.mean(rep[label]))
means_all.append(np.mean(df[label]))
moe_dem.append(margin_of_error(dem[label]))
moe_rep.append(margin_of_error(rep[label]))
moe_all.append(margin_of_error(df[label]))
print(means_dem)
print(means_rep)
print(means_all)
print(moe_dem)
print(moe_rep)
print(moe_all)
fig, ax = plt.subplots()
fig.set_size_inches(20,5)
# Using a loop to graph all issues at once.
# Using an 'if' statement because which bar is on the bottom depends on which party's support is lower. Te "all" mean always being
# in between the other two means makes things easier.
for i in range(0,16):
if means_dem[i] < means_rep[i]:
ax.bar(labels[i], means_dem[i], color='b', yerr=moe_dem[i], width=0.6)
ax.bar(labels[i], means_all[i]-means_dem[i], bottom=means_dem[i], color='g', yerr=moe_all[i], width=0.6)
ax.bar(labels[i], means_rep[i]-means_all[i], bottom=means_all[i], color='r', yerr=moe_rep[i], width=0.6)
else:
ax.bar(labels[i], means_rep[i], color='r', yerr=moe_rep[i], width=0.6)
ax.bar(labels[i], means_all[i]-means_rep[i], bottom=means_rep[i], color='g', yerr=moe_all[i], width=0.6)
ax.bar(labels[i], means_dem[i]-means_all[i], bottom=means_all[i], color='b', yerr=moe_dem[i], width=0.6)
ax.set_title('Support of Democrats and Republicans for various issues (with confidence intervals) -- Blue = Dem; Red = Rep; Green = All')
ax.set_xlabel('Issue')
ax.set_ylabel('Support (With Confidence Intervals)')
ax.set_yticklabels(['0%', '20%', '40%', '60%', '80%', '100%'])
plt.show()
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities. How is a confidence interval built, and how should it be interpreted?It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is: > "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy ------
###Code
# Imports
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from scipy import stats
# Get the dataset
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data
# Load data in dataframe and add in column headers
df = pd.read_csv("house-votes-84.data",
header=None,
names=["party", "handicapped-infants", "water-project",
"budget", "physician-fee-freeze", "el-salvador-aid",
"religious-groups", "anti-satellite-ban",
"aid-to-contras", "mx-missile", "immigration",
"synfuels", "education", "right-to-sue", "crime",
"duty-free", "south-africa"])
print(df.shape)
df.head(2)
# Replace "?" with NaN and "y" / "n" with 1 / 0
df = df.replace({
"?": np.NaN,
"n": 0,
"y": 1,
})
print(df.shape)
df.head()
###Output
(435, 17)
###Markdown
1. Generate and numerically represent a confidence interval
###Code
def confidence_interval(data, confidence=0.95):
"""
Calculates a confidence interval around a sample mean for given data,
using t-distribution and two-tailed test, default 95% confidence.
Arguments:
data - iterable (list or numpy array) of sample observations
confidence - level of confidence for the interval
Returns:
tuple of (mean, lower bound, upper bound)
"""
data = np.array(data) # Standardize to numpy array
mean = np.mean(data)
n = len(data)
# Stdev divided by sqare root of n (degrees of freedom / # observations)
stderr = np.std(data, ddof=1) / np.sqrt(n)
# Std error multiplied by t-statistic gives margin of error
margin_of_error = stderr * stats.t.ppf((1 + confidence) / 2.0, n - 1)
print(f"Margin of error: {margin_of_error}")
print(f"Mean: {mean}")
print(f"Lower bound: {mean - margin_of_error}")
print(f"Upper bound: {mean + margin_of_error}")
return (mean, mean - margin_of_error, mean + margin_of_error)
# Confidence interval of votes on the "immigration" bill
immigration = df["immigration"].dropna(how="any")
ci = confidence_interval(immigration) # Drop null values from the column before using it in calculations
ci
# Count number of votes for and against
df["immigration"].value_counts()
# Look at the votes for and against per party
df["immigration"].groupby(df["party"]).value_counts()
###Output
_____no_output_____
###Markdown
--- 2. Graphically (with a plot) represent the confidence interval
###Code
# Plot with seaborn kdeplot
sns.kdeplot(immigration)
# Add lines to represent the confidence interval bounds
plt.axvline(x=ci[1], color="r");
plt.axvline(x=ci[2], color="r");
plt.axvline(x=ci[0], color="k");
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
# TODO - your code!
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
# TODO - your code!
import pandas as pd
import numpy as np
from scipy.stats import ttest_1samp, ttest_ind
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data'
namelist = ['Class Name','handicapped-infants','water-project-cost-sharing','adoption-of-the-budget-resolution','physician-fee-freeze','el-salvador-aid','religious-groups-in-schools','anti-satellite-test-ban','aid-to-nicaraguan-contras','mx-missile','immigration','synfuels-corporation-cutback','education-spending','superfund-right-to-sue','crime','duty-free-exports','export-administration-act-south-africa']
df = pd.read_csv(url, names=namelist, na_values=np.NaN)
df = df.replace({'?':np.NaN, 'n':0, 'y':1})
df.rename(columns={'Class Name':'Party'},inplace=True)
rep = df[df.Party == 'republican']
dem = df[df.Party == 'democrat']
df.head()
from scipy import stats
dem_set = dem['adoption-of-the-budget-resolution']
rep_set = rep['adoption-of-the-budget-resolution']
# Trying to write this without looking at Ryan's code.
def confidence_interval(data, confidence_level=.95, trim_unit=False):
data = np.array(data)[~np.isnan(data)]
n = len(data)
sample_mean = np.mean(data)
sample_std = np.std(data, ddof=n-1)
std_error = sample_std / n**0.5
# Here I paused for a long time trying to understand scipy.stats.t.ppf
margin_of_error = std_error * stats.t.ppf((1 + confidence_level) / 2.0, n - 1)
lower_bound = sample_mean - margin_of_error
upper_bound = sample_mean + margin_of_error
# Optional: trims confidence interval to valid 0-1 range.
# 0 and 1 not used because they don't plot properly.
if trim_unit == True:
if lower_bound <= 0:
lower_bound = 0.0001
if upper_bound >= 1:
upper_bound = 0.9999
# print(margin_of_error, std_error, n, sample_mean, stats.t.ppf((confidence_level / 2.0), n - 1))
return (sample_mean, lower_bound, upper_bound)
confidence_interval(dem_set, trim_unit=True)
import seaborn as sns
import matplotlib.pyplot as plt
# set numpy to ignore errors
np.seterr(divide='ignore', invalid='ignore')
fig, ax = plt.subplots()
plt.hist([rep_set, dem_set], color=['r', 'b'], alpha=0.5)
CI = confidence_interval(rep_set, trim_unit=True)
plt.axvline(x=CI[1], color='red')
plt.axvline(x=CI[2], color='red')
plt.axvline(x=CI[0], color='crimson')
CI = confidence_interval(dem_set, trim_unit=True)
plt.axvline(x=CI[1], color='m')
plt.axvline(x=CI[2], color='m')
plt.axvline(x=CI[0], color='navy');
def question_plot(question, confidence_level=.95, trim_unit=False):
dem_set = dem[str(question)]
rep_set = rep[str(question)]
dem_ci = confidence_interval(dem_set, confidence_level=0.95, trim_unit=True)
rep_ci = confidence_interval(rep_set, confidence_level=0.95, trim_unit=True)
fig, ax = plt.subplots()
plt.title(question)
plt.hist([rep_set, dem_set], color=['r', 'b'], alpha=0.5)
plt.axvline(x=rep_ci[1], color='red', alpha=0.6)
plt.axvline(x=rep_ci[2], color='red', alpha=0.6)
plt.axvline(x=rep_ci[0], color='crimson')
CI = confidence_interval(dem_set, trim_unit=True)
plt.axvline(x=dem_ci[1], color='m', alpha=0.6)
plt.axvline(x=dem_ci[2], color='m', alpha=0.6)
plt.axvline(x=dem_ci[0], color='navy');
return abs(dem_ci[0] - rep_ci[0])
def question_spread(question, confidence_level=.95, trim_unit=False):
dem_set = dem[str(question)]
rep_set = rep[str(question)]
dem_ci = confidence_interval(dem_set, confidence_level=0.95, trim_unit=True)
rep_ci = confidence_interval(rep_set, confidence_level=0.95, trim_unit=True)
return abs(dem_ci[0] - rep_ci[0])
question_plot('handicapped-infants')
###Output
_____no_output_____
###Markdown
**Both confidence intervals span 0 to 1! Maybe that indicates that both parties were divided on the issue of handicapped infants.**
###Code
ct = pd.crosstab(df['handicapped-infants'], df['Party'])
ct
###Output
_____no_output_____
###Markdown
Not quite. Republicans tended to vote no. Perhaps a better interpretation is: There is insufficient data to estimate the population mean. Here we iterate through every issue. 1. indicate the most divisive issue 2. Plot all the confidence intervals along with a bar plot of votes from both parties.
###Code
top = ''
largest_divide = 0
for q in df.columns[1:]:
result = question_plot(q)
# print(result, largest_divide)
if result > largest_divide:
largest_divide = result
top = q
print(f"{top} is the most divisive bill with a {largest_divide} spread.")
###Output
physician-fee-freeze is the most divisive bill with a 0.9338247338247339 spread.
###Markdown
SummaryMy key takeaway from this exercise was that we can seldom make useful estimations of the population mean among republicans and democrats based on congressional voting records. In most cases, the confidence interval spans the entire space of possible averages. There simply isn't enough data. The only issues where the confidence interval bounds appeared near the middle were on "physician-fee-freeze" and "export-administration-act-south-africa". A case could also be made for "adoption of the budget resolution".I could be wrong. An alternative explanation for these results is just that student's t-distribution is not good enough to find confidence intervals over binomial distributions. But I haven't found evidence of that online.
###Code
persons_data_url = "https://raw.githubusercontent.com/strangelycutlemon/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module3-databackedassertions/persons.csv"
persons_data = pd.read_csv(persons_data_url)
persons_data.head()
persons_data.dtypes
age_bins = pd.cut(persons_data['age'], 8)
weight_bins = pd.cut(persons_data['weight'], 5)
et_bins = pd.cut(persons_data['exercise_time'], 20)
# # Sorting to avoid a crosstab bug
# df = df.sort_values(by='weight', ascending=True)
observed = pd.crosstab(persons_data['age'], persons_data['weight']).values
observed.shape
contingency_table = pd.crosstab(df['age'], df['weight'], margins=True)
contingency_table
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
# TODO - your code!
!wget https://raw.githubusercontent.com/scottwmwork/datasets/master/house-votes-84.data
import pandas as pd
df = pd.read_csv("house-votes-84.data", header=None,
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'])
import numpy as np
df = df.replace({'?':np.NaN, 'n':0, 'y':1})
df.fillna(method = 'bfill');
df.fillna(method = 'ffill');
rep = df[df["party"] == "republican"]
dem = df[df["party"] == "democrat"]
rep.head()
from scipy import stats
def confidence_interval(data, confidence_level=0.95):
data = data.dropna()
sample_mean = np.mean(data)
sample_size = len(data)
sample_std_dev = np.std(data, ddof=1)
standard_error = sample_std_dev / (sample_size**.5)
margin_of_error = standard_error * stats.t.ppf((1 + confidence_level) / 2.0, sample_size - 1)
return (sample_mean, sample_mean - margin_of_error, sample_mean + margin_of_error)
confidence_interval(rep["budget"])
#Now we want to plot the CI for each column for each party!
#Step 1: combine CI for republican and combine CI for republicans
rep_ci = pd.DataFrame()
names = ['handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa']
#create index for dataframe
rep_ci['issue'] = names
mean = []
lower = []
upper = []
#make lists of mean, lower, and upper bounds for all issues
for name in names:
tup = confidence_interval(rep[name])
mean.append(tup[0])
lower.append(tup[1])
upper.append(tup[2])
#add lists to dataframe
rep_ci['mean'] = mean
rep_ci['lower'] = lower
rep_ci['upper'] = upper
rep_ci #Show Dataframe from republican confidence intervals
#Creating dataframe out of democrat CI
dem_ci = pd.DataFrame()
names = ['handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa']
#create index for dataframe
dem_ci['issue'] = names
mean = []
lower = []
upper = []
#make lists of mean, lower, and upper bounds for all issues
for name in names:
tup = confidence_interval(dem[name])
mean.append(tup[0])
lower.append(tup[1])
upper.append(tup[2])
#add lists to dataframe
dem_ci['mean'] = mean
dem_ci['lower'] = lower
dem_ci['upper'] = upper
dem_ci #Show Dataframe from democrat confidence intervals
# Graphically (with a plot) represent a confidence interval
budget_rep_ci = confidence_interval(rep["budget"])
import matplotlib.pyplot as plt
import seaborn as sns
sns.kdeplot(df["budget"], shade ="blue")
plt.axvline(x = budget_rep_ci[0], color = 'r') #mean
plt.axvline(x = budget_rep_ci[1], color = 'b') #upper
plt.axvline(x = budget_rep_ci[2], color = 'y') #lower
# Graphically (with a plot) represent a confidence interval
budget_dem_ci = confidence_interval(dem["budget"])
import matplotlib.pyplot as plt
import seaborn as sns
sns.kdeplot(df["budget"])
plt.axvline(x = budget_dem_ci[0], color = 'r') #mean
plt.axvline(x = budget_dem_ci[1], color = 'b') #upper
plt.axvline(x = budget_dem_ci[2], color = 'y') #lower
votes_no_rep = 0
votes_yes_rep = 0
votes_no_dem = 0
votes_yes_dem = 0
for name in names:
votes_no_rep = votes_no + rep[name].value_counts().tolist()[0]
votes_no_dem = votes_no + dem[name].value_counts().tolist()[0]
votes_yes_rep = votes_yes + rep[name].value_counts().tolist()[1]
votes_yes_dem = votes_yes + dem[name].value_counts().tolist()[1]
print("Total Votes Yes Democrat: ", votes_yes_dem)
print("Total Votes no Democrat: ", votes_no_dem)
print("Total Votes yes Republican: ", votes_yes_rep)
print("Total Votes no Republican: ", votes_no_rep)
###Output
_____no_output_____
###Markdown
X squared tests
###Code
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv', na_values=" ?")
print(df.shape)
df.describe(exclude='number')
cut_points = [0, 9, 19, 29, 39, 49, 1000]
label_names = ['0-9', '10-19', '20-29', '30-39', '40-49', '50+']
df['hours_per_week_categories'] = pd.cut(df['hours-per-week'], cut_points, labels=label_names)
df = df.sort_values(by='hours_per_week_categories', ascending=True)
contingency_table = pd.crosstab(df['sex'], df['hours_per_week_categories'], margins=True)
femalecount = contingency_table.iloc[0][0:6].values
malecount = contingency_table.iloc[1][0:6].values
row_sums = contingency_table.iloc[0:2, 6].values
col_sums = contingency_table.iloc[2, 0:6].values
total = contingency_table.loc['All','All']
expected = []
for i in range(len(row_sums)):
expected_row = []
for column in col_sums:
expected_val = column*row_sums[i]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
observed = pd.crosstab(df['sex'], df['hours_per_week_categories']).values
#Calculate X^2
chi_squared = ((observed - expected)**2/(expected)).sum()
print(f"Chi-Squared: {chi_squared}")
# Calculate Degrees of Freedom
dof = (len(row_sums)-1)*(len(col_sums)-1)
print(f"Degrees of Freedom: {dof}")
###Output
_____no_output_____
###Markdown
Run a χ2 Test using Scipy
###Code
chi_squared, p_value, dof, expected = stats.chi2_contingency(observed)
print(f"Chi-Squared: {chi_squared}")
print(f"P-value: {p_value}")
print(f"Degrees of Freedom: {dof}")
print("Expected: \n", np.array(expected))
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
import pandas as pd
import numpy as np
import seaborn as sns
import scipy.stats as stats
import matplotlib.pyplot as plt
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data
# Load Data
df = pd.read_csv('house-votes-84.data',
header=None,
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'])
print(df.shape)
df.head()
#y/n to numbers, ? to NaN
df = df.replace({'?':np.NaN, 'n':0, 'y':1})
df.head()
#filter a dem df and a rep df
dem = df[df['party'] == 'democrat']
rep = df[df['party'] == 'republican']
rep.shape,dem.shape
def confidence_interval(data, confidence=0.95):
"""
Calculate a confidence interval around a sample mean for given data.
Using t-distribution and two-tailed test, default 95% confidence.
Arguments:
data - iterable (list or numpy array) of sample observations
confidence - level of confidence for the interval
Returns:
tuple of (mean, lower bound, upper bound)
"""
data = np.array(data)
mean = np.mean(data)
n = len(data)
stderr = stats.sem(data)
#stderr = np.std(data, ddof=1) / np.sqrt(n)
margin_of_error = stderr * stats.t.ppf((1 + confidence) / 2.0, n - 1)
print(margin_of_error)
return (mean, mean - margin_of_error, mean + margin_of_error)
confidence_interval(rep['south-africa'].dropna())
###Output
0.07788820497097171
###Markdown
With a margin of error of $\pm$7.8% and 95% confidence, Republicans support the South Africa bill at a rate of 65.8%.---We can conclude with 95% confidence based on our sample that Republicans support the South Africa bill at a rate between 58.0% and 73.5%.
###Code
#a plot of the south africa vote
politics = sns.color_palette(["#DE0100","#1405BD"])
sns.set_palette(politics)
ax = sns.barplot(x="party",y="south-africa",data=df,
capsize=.2)
#confirm the locations of the error bar seaborn automatically inserts
ax.axhline(.58,color='black')
ax.axhline(.735,color='black')
ax = sns.kdeplot(rep['south-africa'].dropna())
ax.axvline(x=confidence_interval(rep['south-africa'].dropna())[1], color='green')
ax.axvline(x=confidence_interval(rep['south-africa'].dropna())[2], color='green')
ax.axvline(x=confidence_interval(rep['south-africa'].dropna())[0], color='k');
#chi square data
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv', na_values=" ?")
print(df.shape)
df.head()
df.describe(exclude='number')
df['sex'].value_counts()
df['race'].value_counts()
df = df.sort_values(by='race', ascending=True)
df.head()
contingency_table = pd.crosstab(df['sex'],df['race'],margins=True)
contingency_table
fcount = contingency_table.iloc[0][0:5].values
fcount
mcount = contingency_table.iloc[1][0:5].values
mcount
races = ["Native","Asian/PI","Black","Other","White"]
plot = plt.bar(races,fcount)
plot2 = plt.bar(races,mcount,bottom=fcount)
plt.legend((plot,plot2), ('Female','Male'))
plt.show();
#isolate the sums for each row (sex) and column (race)
row_sums = contingency_table.iloc[0:2, 5].values
col_sums = contingency_table.iloc[2,0:5].values
row_sums,col_sums
total = contingency_table.loc['All','All']
total
#constructing the expected numpy array
expected = []
for i in range(len(row_sums)):
expected_row = []
for column in col_sums:
expected_val = column*row_sums[i]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
print(expected.shape)
print(expected)
#viewing the observed crosstab
observed = pd.crosstab(df['sex'], df['race']).values
print(observed.shape)
observed
#mathing the arrays directly, without any loops
chi_squared = ((observed - expected)**2/(expected)).sum()
print(f"Chi-Squared: {chi_squared}")
#Just Using Scipy, Thank God
chi_squared, p_value, dof, expected = stats.chi2_contingency(observed)
print(f"Chi-Squared: {chi_squared}")
print(f"P-value: {p_value}")
print(f"Degrees of Freedom: {dof}")
print("Expected: \n", np.array(expected))
###Output
Chi-Squared: 454.2671089131088
P-value: 5.192061302760456e-97
Degrees of Freedom: 4
Expected:
[[ 102.87709223 343.69549461 1033.40204539 89.64531188
9201.3800559 ]
[ 208.12290777 695.30450539 2090.59795461 181.35468812
18614.6199441 ]]
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
import pandas as pd
import numpy as np
# import seaborn as sns
# from matplotlib import style
from scipy.stats import ttest_1samp
from scipy.stats import ttest_ind, ttest_ind_from_stats, ttest_rel
from scipy import stats
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data',
header=None,
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'])
print(df.shape)
df.head()
df= df.replace({'?':np.NaN, 'n':0, 'y':1})
df.head()
df.isnull().sum()
rep = df[df.party == 'republican']
print(rep.shape)
rep.head()
dem = df[df.party=='democrat']
print(dem.shape)
dem.head()
df.party.value_counts()
df=df.fillna(0)
print(df['water-project'].shape)
sample_Water_project= df['water-project'].sample(100)
print(sample_Water_project.shape)
sample_Water_project.head()
def confidence_interval(data, confidence_level=0.95):
data = np.array(data)
sample_mean = np.mean(data)
sample_size = len(data)
sample_std_dev = np.std(data, ddof=1)
standard_error = sample_std_dev / (sample_size**.5)
margin_of_error = standard_error * stats.t.ppf((1 + confidence_level) / 2.0, sample_size - 1)
return (sample_mean, sample_mean - margin_of_error, sample_mean + margin_of_error)
confidence_interval(sample_Water_project)
import seaborn as sns
sns.kdeplot(sample_Water_project)
CI= confidence_interval(sample_Water_project)
plt.axvline(x=CI[1], color='red')
plt.axvline(x=CI[2], color='red')
plt.axvline(x=CI[0], color='k');
###Output
_____no_output_____
###Markdown
3. Interpret Confidence IntervalConfidence Interval == Bounds of statistical significance for our t-testA sample mean that falls inside of our confidence interval will "FAIL TO REJECT" our null hypothesisA sample mean that falls outside of our confidence interval will "REJECT" our null hypothesis Chi-squared tests: By hand using Numpy
###Code
#4. Chi-test
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv', na_values=" ?")
print(df.shape)
df.head()
df.describe(exclude='number')
cut_points = [0, 9, 19, 29, 39, 49, 1000]
label_names = ['0-9', '10-19', '20-29', '30-39', '40-49', '50+']
df['hours_per_week_categories'] = pd.cut(df['hours-per-week'], cut_points, labels=label_names)
df.head()
df['salary'].value_counts()
df['hours_per_week_categories'].value_counts()
df = df.sort_values(by='hours_per_week_categories', ascending=True)
contingency_table = pd.crosstab(df['salary'], df['hours_per_week_categories'], margins=True)
contingency_table
less_or_egal_50k = contingency_table.iloc[0][0:6]
less_or_egal_50k
more_than_50k = contingency_table.iloc[1][0:6]
more_than_50k
import matplotlib.pyplot as plt
import seaborn as sns
#Plots the bar chart
fig = plt.figure(figsize=(10, 5))
sns.set(font_scale=1.8)
categories = ["0-9","10-19","20-29","30-39","40-49","50+"]
p1 = plt.bar(categories, more_than_50k, 0.55, color='#d62728')
p2 = plt.bar(categories, less_or_egal_50k, 0.55, bottom=more_than_50k)
plt.legend((p2[0], p1[0]), ('<=50K', '>50K'))
plt.xlabel('Hours per Week Worked')
plt.ylabel('Count')
plt.show()
row_sums = contingency_table.iloc[0:2, 6].values
col_sums = contingency_table.iloc[2, 0:6].values
print(row_sums)
print(col_sums)
total = contingency_table.loc['All','All']
total
len(df)
df.shape[0]
expected = []
for i in range(len(row_sums)):
expected_row = []
for column in col_sums:
expected_val = column*row_sums[i]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
print(expected.shape)
print(expected)
observed = pd.crosstab(df['salary'], df['hours_per_week_categories']).values
print(observed.shape)
observed
chi_squared = ((observed - expected)**2/(expected)).sum()
print(f"Chi-Squared: {chi_squared}")
dof = (len(row_sums)-1)*(len(col_sums)-1)
print(f"Degrees of Freedom: {dof}")
###Output
Degrees of Freedom: 5
###Markdown
In a single line using Scipy
###Code
df= pd.crosstab(df['salary'], df['hours_per_week_categories']).values
print(df.shape)
df
chi_squared, p_value, dof, expected = stats.chi2_contingency(df)
print(f"Chi-Squared: {chi_squared}")
print(f"P-value: {p_value}")
print(f"Degrees of Freedom: {dof}")
print("Expected: \n", np.array(expected))
###Output
Chi-Squared: 2215.839963859004
P-value: 0.0
Degrees of Freedom: 5
Expected:
[[ 347.70922269 945.95129142 1815.98353859 2783.95135285
13920.51595467 4905.88863978]
[ 110.29077731 300.04870858 576.01646141 883.04864715
4415.48404533 1556.11136022]]
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
# TODO - your code!
# Practice
import numpy as np
import scipy as scipy
# create table as np array
table = np.array([[200, 290],[400,910] ])
print("contingency table: \n", table)
stat, p, dof, expected = scipy.stats.chi2_contingency(table, correction=False)
### Print out the stats in a nice format
print('Expected values: \n ', expected.round(2))
print(f'The chi square statistics is: {stat:.3f}')
print(f'The p value is: {p:.6f}')
###Output
contingency table:
[[200 290]
[400 910]]
Expected values:
[[163.33 326.67]
[436.67 873.33]]
The chi square statistics is: 16.965
The p value is: 0.000038
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data
import pandas as pd
df = pd.read_csv('house-votes-84.data',
header=None,
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'])
print(df.shape)
df.head()
import numpy as np
df = df.replace({'?':np.NaN, 'n':0, 'y':1})
df.head()
df = df.fillna(method='ffill')
df.isnull().sum()
dem = df[df['party'] == 'democrat']
rep = df[df['party'] == 'republican']
import numpy as np
import scipy.stats as stats
def confidence_interval(data, confidence=0.95):
data = np.array(data)
mean = np.mean(data)
n = len(data)
stderr = np.std(data, ddof=1) / np.sqrt(n)
margin_of_error = stderr * stats.t.ppf((1 + confidence) / 2.0, n - 1)
print(margin_of_error)
return (mean, mean - margin_of_error, mean + margin_of_error)
demwater = dem['water-project']
confidence_interval(demwater)
import seaborn as sns
import matplotlib.pyplot as plt
sns.kdeplot(demwater)
CI = confidence_interval(demwater)
plt.axvline(x=CI[1], color='red')
plt.axvline(x=CI[2], color='red')
plt.axvline(x=CI[0], color='k');
###Output
0.060350601849396855
###Markdown
With 95% confidence, I believe the sample mean is within 0.43028610227045333 and 0.550987305969247, or 0.49063670411985016 +/- 0.060350601849396855.
###Code
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv', na_values=" ?")
print(df.shape)
df.head()
df['sex'].value_counts()
df['education'].value_counts()
contingency_table = pd.crosstab(df['sex'], df['education'], margins=True)
contingency_table
femalecount = contingency_table.iloc[0][0:16].values
femalecount
malecount = contingency_table.iloc[1][0:16].values
malecount
row_sums = contingency_table.iloc[0:2, 16].values
col_sums = contingency_table.iloc[2, 0:16].values
print(row_sums)
print(col_sums)
total = contingency_table.loc['All','All']
total
df.shape[0]
expected = []
for i in range(len(row_sums)):
expected_row = []
for column in col_sums:
expected_val = column*row_sums[i]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
print(expected.shape)
print(expected)
observed = pd.crosstab(df['sex'], df['education']).values
print(observed.shape)
observed
chi_squared = ((observed - expected)**2/(expected)).sum()
print(f"Chi-Squared: {chi_squared}")
dof = (len(row_sums)-1)*(len(col_sums)-1)
print(f"Degrees of Freedom: {dof}")
chi_squared, p_value, dof, expected = stats.chi2_contingency(observed)
print(f"Chi-Squared: {chi_squared}")
print(f"P-value: {p_value}")
print(f"Degrees of Freedom: {dof}")
print("Expected: \n", np.array(expected))
###Output
Chi-Squared: 297.71500372503687
P-value: 1.667778440920507e-54
Degrees of Freedom: 15
Expected:
[[ 308.63127668 388.6835478 143.2340223 55.57347747 110.15457142
213.69325266 170.02837751 352.95774086 457.15801112 1771.40459445
136.61813212 3473.67313657 569.95893861 16.87051995 190.53763705
2411.82276343]
[ 624.36872332 786.3164522 289.7659777 112.42652253 222.84542858
432.30674734 343.97162249 714.04225914 924.84198888 3583.59540555
276.38186788 7027.32686343 1153.04106139 34.12948005 385.46236295
4879.17723657]]
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv("house-votes-84.data", names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'])
df.head()
#Now, lets turn the table to binary
for vote in df:
if vote != 'party':
df[vote] = df[vote].apply(lambda x: 1 if x == 'y' else 0 if x == 'n' else np.NaN)
df.head()
import numpy as np
data = np.array(df['handicapped-infants'])
data = data[~np.isnan(data)]
data
import statsmodels.stats.proportion as portion
CI = portion.proportion_confint(data.sum(), len(data))
CI
import seaborn as sns
sns.kdeplot(df['handicapped-infants'])
plt.axvline(x=CI[0], color='red')
plt.axvline(x=CI[1], color='red')
plt.axvline(x=(df['handicapped-infants'].sum() / len(df['handicapped-infants'])), color = 'k');
from scipy import stats
def confidence_interval(data, confidence_level=.95):
data = np.array(data)
data = data[~np.isnan(data)]
sample_mean = np.mean(data)
sample_size = len(data)
sample_std_dev = np.std(data, ddof=1)
std_err = sample_std_dev / (sample_size ** .5)
err_margin = std_err * stats.t.ppf((1 + confidence_level) / 2, sample_size - 1)
return (sample_mean, sample_mean - err_margin, sample_mean + err_margin)
cf = confidence_interval(df['handicapped-infants'])
cf
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
import pandas as pd
import numpy as np
import scipy as sc
import matplotlib.pyplot as plt
from scipy.stats import ttest_ind
#Downloading data from the website
col_names = ['party','handicapped_infants','water_project_cost_sharing','adoption_of_the_budget_resolution','physician_fee_freeze',
'el_salvador_aid','religious_groups_in_schools','anti_satellite_test_ban','aid_to_nicaraguan_contras',
'mx_missile','immigration','synfuels_corporation_cutback','education_spending',
'superfund_right_to_sue','crime','duty_free_exports','export_administration_act_south_africa']
voting_data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data",names=col_names)
print(voting_data.shape)
#Replacing Categoricals values with numbers
voting_data=voting_data.replace({'?':np.nan,'y':1,'n':0})
#Checking for Null values
#Observation : the nulls don't match with the one given on UCI website, I have rechecked the data on UCI seems to be incorrect.
# Creating Samples based on Parties
republics = voting_data[voting_data.party=='republican']
democrats = voting_data[voting_data.party=='democrat']
print("Republics :",republics.shape)
print("Democrat :",democrats.shape)
voting_data.head()
fig = plt.figure(figsize=(5,100))
nrow =1
ax = fig.subplots(4,4)
ax[0]
#Generating confidence interval for each of the Vote
confidence_interval = 0.95
fig = plt.figure(figsize=(20,20))
nrow =0
ncol =0
ax = fig.subplots(4,4)
for col_name in voting_data.columns:
if(col_name!='party'):
sample_vote = voting_data[~voting_data[col_name].isnull()][col_name]
size_sample = len(sample_vote)
mean_sample = sample_vote.mean()
std_sample = np.std(sample_vote.values,ddof=1)
standard_error = std_sample/np.sqrt(size_sample)
margin_error = standard_error*sc.stats.t.ppf((1+confidence_interval)/2,size_sample-1)
#print("Sample size: ",size_sample,"\nSample mean: ",mean_sample,"\nSample Std Dev: ",std_sample,"\nStandard error: ",standard_error,"\nMargin of error: ",margin_error)
sample_vote.plot(kind='kde',ax=ax[nrow,ncol])
ax[nrow,ncol].axvline(x=mean_sample,color='blue')
ax[nrow,ncol].axvline(x=(mean_sample-margin_error),color='purple')
ax[nrow,ncol].axvline(x=(mean_sample+margin_error),color='purple')
ax[nrow,ncol].set_title(col_name)
ax[nrow,ncol].set_ylabel("")
if(ncol==3):
ncol=0
nrow +=1
else:
ncol +=1
fig
#Calculating chi squared test
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv', na_values=" ?")
print(df.shape)
df.head()
df['education'].value_counts()
df['workclass'].value_counts()
#Calculating chi square for workingclass against education
working_education = pd.crosstab(df['workclass'],df['education'],margins=True)
nrows,ncols = working_education.shape
working_education
#Calculating chisquare using numpy
observed_values = working_education.values[:nrows-1,:ncols-1]
total_values = working_education.values[-1,-1]
row_totals = working_education.values[:-1,-1]
col_totals = working_education.values[-1,:-1]
expected_values = np.outer(row_totals,col_totals)/total_values
chisquare_value = ((observed_values-expected_values)**2/expected_values).sum()
chisquare_value
#calculating chisquare using stat function
sc.stats.chi2_contingency(observed_values)
#chisqare value of 2247 and p value of 0, i reject the null hypothesis that education and working class are independent of each other.
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy Confidence Intervals 1) Generate and numerically represent a confidence interval
###Code
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa']
df = pd.read_csv('house-votes-84.data',
header=None,
names=names)
print(df.shape)
df.head()
df = df.replace({'?': np.NaN, 'y':1, 'n': 0})
dem = df[df['party'] == 'democrat']
rep = df[df['party'] == 'republican']
dem['aid-to-contras'].mean()
#Dropping nas to use with function
dem_contras = dem['aid-to-contras'].dropna()
def sample_confidence_interval(data, confidence_level=0.95):
data = np.array(data)
mean = sum(data) / len(data)
std_error = np.std(data, ddof=1) / (len(data))**(1/2)
t_value = stats.t.ppf((1 + confidence_level) / 2.0, len(data) - 1)
margin = t_value * std_error
return (mean, mean - margin, mean + margin)
#Checking to make sure the code works
sample_confidence_interval(dem_contras)
# I tried a few different styles, but I liked Ryan's graphical
#representation best
dem_contras.plot(kind='density', figsize=(10,8))
#zooming in to get a better view, the margin of error is pretty small
plt.xlim(left = -0.1, right=1.1)
plt.grid()
CI = sample_confidence_interval(dem_contras)
plt.axvline(x=CI[1], color='red', lw=1)
plt.axvline(x=CI[2], color='red', lw=1)
plt.axvline(x=CI[0], color='black', lw=3);
dem['south-africa'].dropna().plot(kind='density', figsize=(10,8))
CI = sample_confidence_interval(dem['south-africa'].dropna())
plt.xlim(left=-.2, right=1.2)
plt.grid()
plt.axvline(x=CI[1], color='red')
plt.axvline(x=CI[2], color='red')
plt.axvline(x=CI[0], color='black');
#This graph serves no purpose, and should be ignored. But it looks cool.
for issue in df.columns[range(1,17)]:
dem[issue].dropna().plot(kind='density', figsize=(10,8))
CI = sample_confidence_interval(dem[issue].dropna())
plt.axvline(x=CI[1], color='red')
plt.axvline(x=CI[2], color='red')
plt.axvline(x=CI[0], color='black');
###Output
_____no_output_____
###Markdown
Chi-squared Test
###Code
# Loading in a dataset from a previous lecture
dataset_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data'
column_headers = ['age', 'workclass', 'fnlwgt', 'education', 'education-num',
'marital-status', 'occupation', 'relationship', 'race', 'sex',
'capital-gain', 'capital-loss', 'hours-per-week',
'native-country', 'income']
#Note that having the incorrect number of column headers makes the far left the "index",
df_chi = pd.read_csv(dataset_url, names=column_headers)
print(df.shape)
df_chi.head(5)
df_chi['race'].value_counts()
df_chi['marital-status'].value_counts()
#Putting the two categorical variables into a crosstab
crosstab_table = pd.crosstab(df_chi['sex'], df_chi['race'], margins=True)
crosstab_table
row_sums = crosstab_table.iloc[0:2, 5].values
col_sums = crosstab_table.iloc[2, 0:5].values
total = crosstab_table.loc['All','All']
print(row_sums)
print(col_sums)
print(total)
#Creating an empty list to fill with expected values
expected = []
for num in range(len(row_sums)):
expected_row = []
for col in col_sums:
expected_val = col*row_sums[num]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
# TODO - your code!
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from scipy import stats
###Output
_____no_output_____
###Markdown
Confidence Interval
###Code
#import with column names and NaN values.
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data',
header=None,
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'])
df.head()
df = df.replace({'y': 1, 'n': -1, '?': 0})
df.shape
df.head()
df['party'].value_counts()
dems = df[df['party'] == 'democrat']
reps = df[df['party'] == 'republican']
dems.head()
reps.head()
def conf_inter(data, conf=0.95):
data = np.array(data)
data = data[~np.isnan(data)]
mean = np.mean(data)
n = len(data)
stderr = np.std(data, ddof=1) / np.sqrt(n)
margin_err = stderr * stats.t.ppf((1 + conf) / 2.0, n - 1)
return mean, margin_err, (mean - margin_err, mean + margin_err)
conf_inter(dems['budget'])
cols = list(df.columns[1:])
cols
means = [conf_inter(df[i])[0] for i in cols]
means
errors = [conf_inter(df[i])[1] for i in cols]
errors
fig, ax = plt.subplots()
ax.bar(np.linspace(0, len(means), len(means)), means, yerr=errors, color='green')
ax.set_xticks(range(17))
ax.set_xticklabels(cols, rotation=90);
ax.yaxis.grid(True)
ax.set_title("How is your government voting?")
plt.show()
dem_means = [conf_inter(dems[i])[0] for i in cols]
dem_errors = [conf_inter(dems[i])[1] for i in cols]
fig, ax = plt.subplots()
ax.bar(np.linspace(0, len(dem_means), len(dem_means)), dem_means, yerr=dem_errors, color='blue')
ax.set_xticks(range(17))
ax.set_xticklabels(cols, rotation=90);
ax.yaxis.grid(True)
ax.set_title("How are the dems voting?")
plt.show()
rep_means = [conf_inter(reps[i])[0] for i in cols]
rep_errors = [conf_inter(reps[i])[1] for i in cols]
fig, ax = plt.subplots()
ax.bar(np.linspace(0, len(rep_means), len(rep_means)), rep_means, yerr=rep_errors, color='red')
ax.set_xticks(range(17))
ax.set_xticklabels(cols, rotation=90);
ax.yaxis.grid(True)
ax.set_title("How are the reps voting?")
plt.show()
###Output
_____no_output_____
###Markdown
Chi testing
###Code
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data
import pandas as pd
import numpy as np
import scipy.stats as stats
house = pd.read_csv("house-votes-84.data", header=None, names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'] )
house=house.replace({'?':np.NaN, 'n':0, 'y':1})
# let split the data into reublican and democraft and y=1, n=0, ? = na.NaN
#house.head(5)
#seperate files
republican = house[house['party']== 'republican']
democrat = house[house['party'] == 'democrat']
#get the mean precentage of yes votes
MeanForDemo = democrat.mean()
List_of_dec_rounded_demoncrats=[round(elem,2) for elem in MeanForDemo]
MeanForRep = republican.mean()
List_of_dec_rounded_republican = [round(elem,2) for elem in MeanForRep]
List_of_dec_rounded_republican
democrat.columns
def confidence_interval(data, confidence=.95):
data=np.array(data)
mean=np.nanmean(data)
n=len(data)
#stderr= stats.sem(data)
stderr = np.std(data,ddof=1 / np.sqrt(n))
margin_of_error = stderr * stats.t.ppf((1+confidence)/2.0, n-1, nan_policy='omit')
print(margin_of_error)
return(mean,mean-margin_of_error, mean+ margin_of_error)
def mean_confidence_interval(data, confidence=.95):
a = 1.0 * np.array(data)
n=len(a)
m = np.nanmean(a)
se = scipy.stats.sem(a, nan_policy='omit')
h=se*scipy.stats.t.ppf((1+confidence)/2., n-1)
return m, m-h, m+h
###Output
_____no_output_____
###Markdown
This is Generate and numericallyu represents a confidence interval
###Code
mean_confidence_interval(democrat['physician-fee-freeze'])
###Output
_____no_output_____
###Markdown
Graphically (with a plot) represent the confidence interval
###Code
import seaborn as sns
import matplotlib.pyplot as plt
sns.kdeplot(democrat['physician-fee-freeze'])
CI = mean_confidence_interval(democrat['physician-fee-freeze'])
plt.axvline(x=CI[1], color='red')
plt.axvline(x=CI[2], color='red')
plt.axvline(x=CI[0], color='k')
ind = np.arange(16)
#plt
fig, ax = plt.subplots(figsize=(15,8))
#what will plotting?
plt.bar(ind, List_of_dec_rounded_demoncrats, width=.3, color='b')
Rep = ax.bar(ind+.5, List_of_dec_rounded_republican, width=.3, color='r')
#set labels title
ax.set_ylabel('precentage')
ax.set_title('comparsion if each group')
#this is the x ticks
ax.set_xticks(ind + .3)
ax.set_xticklabels(bills,rotation=50)
#
ax.legend( (Demo[0],Rep[0]), ('Men', 'Women'))
plt.show()
###Output
_____no_output_____
###Markdown
redoing graph. leaveing above graph for to help me redo
###Code
import scipy as stddd
GetSTandardDev_democrat=[]
count = 0
while (count<16):
#print(count)
GetSTandardDev_democrat.append(np.std(democrat.iloc[:,[count]]))
count=count+1
print(list(GetSTandardDev_democrat[5]))
GetSTandardDev_Rep=[]
count = 0
while (count<16):
#print(count)
GetSTandardDev_Rep.append(np.std(republican.iloc[:,[count]]))
count=count+1
print(list(GetSTandardDev_Rep[5]))
# libraries
import numpy as np
import matplotlib.pyplot as plt
# width of the bars
barWidth = 0.3
# Choose the height of the blue bars
bars1 = List_of_dec_rounded_demoncrats
# Choose the height of the red bars
bars2 = List_of_dec_rounded_republican
# Choose the height of the error bars (bars1)
yer1 = GetSTandardDev_democrat
# Choose the height of the error bars (bars2)
yer2 = GetSTandardDev_Rep
# The x position of bars
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
# Create blue bars
plt.bar(r1, bars1, width = barWidth, color = 'blue', edgecolor = 'black', yerr=yer1, capsize=17, label='demo')
# Create cyan bars
plt.bar(r2, bars2, width = barWidth, color = 'red', edgecolor = 'black', yerr=yer2, capsize=17, label='repbulican')
# general layout
plt.xticks([r + barWidth for r in range(len(bars1))], ['cond_A', 'cond_B', 'cond_C'])
plt.ylabel('height')
plt.legend()
# Show graphic
plt.show()
bars1 = GetSTandardDev_democrat
type(bars1)
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
import pandas as pd
import numpy as np
from scipy import stats
# generate and numerically represent a confidence interval
def confidence_interval(data, confidence_level=0.95):
data = np.array(data)
sample_mean = np.mean(data)
sample_size = len(data)
sample_std_dev = np.std(data, ddof=1)
standard_error = sample_std_dev / (sample_size**.5)
margin_of_error = standard_error * stats.t.ppf((1 + confidence_level) / 2.0, sample_size - 1)
return (sample_mean, sample_mean - margin_of_error, sample_mean + margin_of_error)
coinflips_100 = np.random.binomial(n=1, p=.5, size=100)
confidence_interval(coinflips_100)
# graphically represent a confidence interval
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
sns.kdeplot(coinflips_100)
CI = confidence_interval(coinflips_100)
plt.axvline(x=CI[1], color='red')
plt.axvline(x=CI[2], color='red')
plt.axvline(x=CI[0], color='k')
plt.show()
# Confidence Interval == Bounds of statistical significance for our t-testjj
# A null hypothesis that falls inside of our confidence interval will "FAIL TO REJECT" our null hypothesis
# A null hypothesis that falls outside of our confidence interval will "REJECT" our null hypothesis
# load in data
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data
import pandas as pd
import numpy as np
import seaborn as sns
df = pd.read_csv('house-votes-84.data',
header=None,
names=['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa'])
print(df.shape)
df.head()
df = df.replace({'?':np.NaN, 'n':0, 'y':1})
print(df.shape)
df.head()
df = df.dropna()
print(df.shape)
df.head()
contingency_table = pd.crosstab(df['education'], df['budget'], margins=True)
contingency_table
# Expected value calculation
row_sums = contingency_table.iloc[0:2, 2].values
col_sums = contingency_table.iloc[2, 0:2].values
total = contingency_table.loc['All', 'All']
expected = []
for i in range(len(row_sums)):
expected_row = []
for column in col_sums:
expected_val = column*row_sums[i]/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
print(expected.shape)
print(expected)
# Obvserved valu calculation
observed = pd.crosstab(df['education'], df['budget']).values
print(observed.shape)
observed
# chi-squared satatistic with numpy
chi_squared = ((observed - expected)**2/(expected)).sum()
print(f"Chi-Squared: {chi_squared}")
dof = (len(row_sums)-1)*(len(col_sums)-1)
print(f"Degrees of Freedom: {dof}")
# Chi-squared test using Scipy
chi_squared, p_value, dof, expected = stats.chi2_contingency(observed)
print(f"Chi-Squared: {chi_squared}")
print(f"P-Value: {p_value}")
print(f"Degrees of Freedom: {dof}")
print("Expected: \n", np.array(expected))
###Output
Chi-Squared: 104.47768266706376
P-Value: 1.5897396773487855e-24
Degrees of Freedom: 1
Expected:
[[58.25862069 65.74137931]
[50.74137931 57.25862069]]
###Markdown
Assignment - Build a confidence intervalA confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%.52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$.In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis.But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities.How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times."For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations.Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same.Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals:1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution?
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data', header=None)
df.head()
###Output
_____no_output_____
###Markdown
Clean dataset
###Code
df =df.replace('?', None)
df[11][0]='n'
df.head()
df.isna().sum()
df.describe(exclude='number')
# Change n/y to binary
df =df.replace('y', 1)
df = df.replace('n', 0)
df.head()
df.columns = ['class', 'infants', 'water_cost', 'budget', 'fee_freeze', 'aid_elsalvador', 'rel_school', 'satellite', 'aid_contras', 'mx_missle', 'immigration', 'cutback', 'education', 'right_to_sue', 'crime', 'duty_free_ex', 'export_south_africa']
df.head()
df.describe()
###Output
_____no_output_____
###Markdown
Subset data into 2 subset for democrats and republicans
###Code
df_republican = df[df['class']== 'republican']
df_republican.head()
df_republican.shape
df_republican.columns
df_republican.describe()
df_democrat = df[df['class']== 'democrat']
df_democrat.head()
df_democrat.shape
df_democrat.describe()
###Output
_____no_output_____
###Markdown
Generate Confidence intervals1. Generate and numerically represent a confidence interval2. Graphically (with a plot) represent the confidence interval3. Interpret the confidence interval - what does it tell you about the data and its distribution?
###Code
from scipy import stats
def confidence_interval(data, confidence=0.95):
"""
Calculate a confidence interval around a sample mean for given data.
Using t-distribution and two-tailed test, default 95% confidence.
Arguments:
data - iterable (list or numpy array) of sample observations
confidence - level of confidence for the interval
Returns:
tuple of (mean, lower bound, upper bound)
"""
data = np.array(data)
mean = np.mean(data)
n = len(data)
stderr = stats.sem(data)
interval = stderr * stats.t.ppf((1 + confidence) / 2.0, n - 1)
return (mean, mean - interval, mean + interval)
###Output
_____no_output_____
###Markdown
Infants issue
###Code
dem_infants= df_democrat['infants']
dem_infants.describe()
sample_size = 100
sample = dem_infants.sample(sample_size)
sample.head()
sample_mean = sample.mean()
sample_std = np.std(sample, ddof=1)
print(sample_mean, sample_std)
standard_error = sample_std/np.sqrt(sample_size)
standard_error
t = 1.84
(sample_mean, sample_mean - t*standard_error, sample_mean + t*standard_error)
confidence_interval(sample, confidence=0.95)
confidence_interval(dem_infants,confidence=0.95 )
# So the mean, 0.59925 falls between the confidence interval of 0.54 and 0.65
# which is confirmed below via the histogram
plt.hist(sample, bins=10)
rep_infants= df_republican['infants']
rep_infants.describe()
sample_size1 = 100
sample1 = rep_infants.sample(sample_size1)
sample1.head()
sample_mean1 = sample1.mean()
sample_std1 = np.std(sample1, ddof=1)
print(sample_mean1, sample_std1)
standard_error1 = sample_std1/np.sqrt(sample_size)
standard_error1
t = 1.84
(sample_mean1, sample_mean1 - t*standard_error1, sample_mean1 + t*standard_error1)
confidence_interval(sample1, confidence=0.95)
# So the mean, 0.19 falls between the confidence interval of 0.112 and 0.268
# which is confirmed below via the histogram
plt.hist(sample1)
###Output
_____no_output_____
###Markdown
Chi-squared tests:4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
# make a crosstab
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/adult.csv', na_values=" ?")
print(df.shape)
df.head()
df['hours-per-week'].hist(bins=20); # focus on this like lecture but will use age instead of sex
df.describe(exclude='number')
# see if difference in age in hours per week so turning hours-per-week into a category, nonnumerical and compare to sex
cut_points =[0,9,19,29,39,49,500] # cutoff points for hours per week
label_names = ['0-9', '10-19', '20-29', '30-39', '40-49', '50+'] # split into these time buckets
df['hours_per_week_categories'] = pd.cut(df['hours-per-week'], cut_points, labels=label_names)
df['hours_per_week_categories'].value_counts()
df['age'].value_counts()
# create the crosstab
df = df.sort_values(by='hours_per_week_categories')
contingency_table = pd.crosstab(df['age'], df['hours_per_week_categories'], margins=True)
contingency_table
###Output
_____no_output_____
###Markdown
Expected Value Calculation\begin{align}expected_{i,j} =\frac{(row_{i\ total})(column_{j\ total} ) }{(\text{total observations})} \end{align}
###Code
row_sums = contingency_table.iloc[0:2, 6].values # extract 0 row to 2, not including 2
col_sums = contingency_table.iloc[2, 0:6].values
print(row_sums)
print('__')
print(col_sums)
total = contingency_table.loc['All','All']
total
# showing how to manually get chi squared, although can do throug scipy
expected = []
for row_sum in row_sums:
expected_row = []
for column in col_sums:
expected_val = column*row_sum/total
expected_row.append(expected_val)
expected.append(expected_row)
expected = np.array(expected)
print(expected.shape)
print(expected)
###Output
(2, 6)
[[0.08491754 1.26163201 2.31703572 1.88031694 2.55965726 0.53376739]
[0.11823961 1.7567028 3.22625226 2.61816283 3.56407973 0.74322042]]
###Markdown
Chi-Squared Statistic with Numpy\begin{align}\chi^2 = \sum \frac{(observed_{i}-expected_{i})^2}{(expected_{i})}\end{align}For the $observed$ values we will just use a version of our contingency table without the margins as a numpy array. In this way, if our observed values array and our expected values array are the same shape, then we can subtract them and divide them directly which makes the calculations a lot cleaner. No for loops!
###Code
observed = pd.crosstab(df['sex'], df['hours_per_week_categories']).values
print(observed.shape)
observed
chi_square = ((observed - expected)**2/(expected)).sum()
chi_square
###Output
_____no_output_____
###Markdown
Run a $\chi^{2}$ Test using Scipy
###Code
chi_squared, p_value, dof, expected = stats.chi2_contingency(observed)
print(chi_square, p_value, dof, expected)
# chi is 2287, p value so small its 0.0 so can reject it
# Reject null that hours per week is independent of age
###Output
_____no_output_____
###Markdown
Assignment - Build a confidence interval A confidence interval refers to a neighborhood around some point estimate, the size of which is determined by the desired p-value. For instance, we might say that 52% of Americans prefer tacos to burritos, with a 95% confidence interval of +/- 5%. 52% (0.52) is the point estimate, and +/- 5% (the interval $[0.47, 0.57]$) is the confidence interval. "95% confidence" means a p-value $\leq 1 - 0.95 = 0.05$. In this case, the confidence interval includes $0.5$ - which is the natural null hypothesis (that half of Americans prefer tacos and half burritos, thus there is no clear favorite). So in this case, we could use the confidence interval to report that we've failed to reject the null hypothesis. But providing the full analysis with a confidence interval, including a graphical representation of it, can be a helpful and powerful way to tell your story. Done well, it is also more intuitive to a layperson than simply saying "fail to reject the null hypothesis" - it shows that in fact the data does *not* give a single clear result (the point estimate) but a whole range of possibilities. How is a confidence interval built, and how should it be interpreted? It does *not* mean that 95% of the data lies in that interval - instead, the frequentist interpretation is "if we were to repeat this experiment 100 times, we would expect the average result to lie in this interval ~95 times." For a 95% confidence interval and a normal(-ish) distribution, you can simply remember that +/-2 standard deviations contains 95% of the probability mass, and so the 95% confidence interval based on a given sample is centered at the mean (point estimate) and has a range of +/- 2 (or technically 1.96) standard deviations. Different distributions/assumptions (90% confidence, 99% confidence) will require different math, but the overall process and interpretation (with a frequentist approach) will be the same. Your assignment - using the data from the prior module ([congressional voting records](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records)): Confidence Intervals: 1. Generate and numerically represent a confidence interval 2. Graphically (with a plot) represent the confidence interval 3. Interpret the confidence interval - what does it tell you about the data and its distribution? Chi-squared tests: 4. Take a dataset that we have used in the past in class that has **categorical** variables. Pick two of those categorical variables and run a chi-squared tests on that data - By hand using Numpy - In a single line using Scipy
###Code
import numpy
import scipy.stats as stats
import pandas
import matplotlib.pyplot as pyplot
cols = ['party','handicapped-infants','water-project',
'budget','physician-fee-freeze', 'el-salvador-aid',
'religious-groups','anti-satellite-ban',
'aid-to-contras','mx-missile','immigration',
'synfuels', 'education', 'right-to-sue','crime','duty-free',
'south-africa']
get_ipython().system('wget -N https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data')
df = pandas.read_csv('house-votes-84.data',
header=None,
names=cols).replace({'?':numpy.NaN, 'n':0, 'y':1})
df.head()
partyStats = {}
democrats = df[df['party']=='democrat']
republicans = df[df['party']=='republican']
for party in ['democrat', 'republican']:
partyData = df[df['party']==party]
partyStats[party] = {'means': [],
'confidence_intervals': [],
'standard_errors': [],
'margins_of_error': []}
for column in cols[1:]:
n = partyData[column].count()
dof = n - 1
mean = partyData[column].mean()
std_err = numpy.std(partyData[column],ddof=1) / numpy.sqrt(n)
confidence_interval = stats.t.interval(0.95, dof, loc=mean, scale=std_err)
margin_of_error = std_err * stats.t.ppf(0.975, dof)
partyStats[party]['means'].append(mean)
partyStats[party]['confidence_intervals'].append(confidence_interval)
partyStats[party]['margins_of_error'].append(margin_of_error)
partyStats[party]['standard_errors'].append(std_err)
x = numpy.arange(len(cols[1:]))
width = 0.3
pyplot.bar(x-width/2, partyStats['democrat']['means'], width=width, yerr=partyStats['democrat']['margins_of_error'], color='blue', ecolor='black', label='Democrats')
pyplot.bar(x+width/2, partyStats['republican']['means'], width=width, yerr=partyStats['republican']['margins_of_error'], color='red', ecolor='black', label='Republicans')
figure = pyplot.gcf()
figure.set_size_inches((8,6))
figure.patch.set(facecolor='#073642')
figure.axes[0].patch.set(facecolor='#073642')
pyplot.xticks(x, cols[1:], rotation='vertical')
pyplot.ylabel('Proportion voted for')
pyplot.xlabel('Issue')
pyplot.title('Proportions of house members for various bills by party')
pyplot.legend()
pyplot.show()
get_ipython().system('wget -N https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv')
get_ipython().system('wget -N https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv')
get_ipython().system('wget -N https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv')
get_ipython().system('wget -N https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv')
get_ipython().system('wget -N https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv')
gdp_per_capita = pandas.read_csv('ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv')
life_expectancy = pandas.read_csv('ddf--datapoints--life_expectancy_years--by--geo--time.csv')
population_total = pandas.read_csv('ddf--datapoints--population_total--by--geo--time.csv')
entities = pandas.read_csv('ddf--entities--geo--country.csv')
concepts = pandas.read_csv('ddf--concepts.csv')
merged_geotime = pandas.merge(gdp_per_capita,
pandas.merge(life_expectancy,
population_total,
on=['geo', 'time']
),
on=['geo', 'time']
)
merged = pandas.merge(merged_geotime,
entities[['country','gapminder_list','world_4region','world_6region','name']],
left_on='geo',
right_on='country'
).rename(
columns={'income_per_person_gdppercapita_ppp_inflation_adjusted': 'gdp_per_capita'})
gdp_bins = pandas.qcut(merged['gdp_per_capita'], 20)
le_bins = pandas.cut(merged['life_expectancy_years'], 20)
gdp_region_crosstab = pandas.crosstab(gdp_bins, merged['world_6region'], margins=True)
le_region_crosstab = pandas.crosstab(le_bins, merged['world_6region'], margins=True)
le_region_crosstab
d = {'Life Expectancy vs Region': le_region_crosstab,
'GDP vs Region': gdp_region_crosstab}
for key in d:
observed = d[key]
expected = observed.copy()
for column, values in observed.items():
for row in values.index:
expected.loc[row, column] = expected.loc['All', column] * expected.loc[row, 'All'] / expected.loc['All', 'All']
chi_squared_dof = (expected.shape[0]-1) * (expected.shape[1]-1)
print(f'Chi-Squared information for {key}')
print('numpy-calculated:')
chi_squared = ((numpy.array(observed) - numpy.array(expected))**2/numpy.array(expected)).sum()
print(f'\tChi-Squared: {chi_squared} ({chi_squared_dof} degrees of freedom)')
critical = stats.chi2.ppf(q=0.95, df=chi_squared_dof)
print(f'\tCritical value for {chi_squared_dof} degrees of freedom at p < 0.05: {critical}')
print('scipy-calculated:')
chi_squared, p, chi_squared_dof, _ = stats.chi2_contingency(observed)
print(f'\tChi-Squared: {chi_squared} ({chi_squared_dof} degrees of freedom)')
print(f'\tp = {p}')
print(f'We are confident (p < 0.05) that there is an association in {key}.\n')
###Output
Chi-Squared information for Life Expectancy vs Region
numpy-calculated:
Chi-Squared: 10951.03990391922 (120 degrees of freedom)
Critical value for 120 degrees of freedom at p < 0.05: 146.56735758076744
scipy-calculated:
Chi-Squared: 10951.039903919218 (120 degrees of freedom)
p = 0.0
We are confident (p < 0.05) that there is an association in Life Expectancy vs Region.
Chi-Squared information for GDP vs Region
numpy-calculated:
Chi-Squared: 15351.473361831417 (120 degrees of freedom)
Critical value for 120 degrees of freedom at p < 0.05: 146.56735758076744
scipy-calculated:
Chi-Squared: 15351.473361831415 (120 degrees of freedom)
p = 0.0
We are confident (p < 0.05) that there is an association in GDP vs Region.
|
Code/NLP scores.ipynb | ###Markdown
Generating scores based on the similarity between pre-defined dictionary and caption+alt-text phrases.
###Code
import pandas as pd
import numpy as np
import seaborn as sns
%cd /content/drive/MyDrive
df=pd.read_csv('complete_data.csv')
x=df.columns
x=x.tolist()
ll=[]
a=0
for i in range(1428,1558):
df_i=df[[x[i],x[1558]]]
df_ia=df_i.values
ab=False
# print(df_ia.shape)
l=[0,0,0,0,i]
for j in range(df_ia.shape[0]):
ab=False
if(df_ia[j,0]==0 and df_ia[j,1]==0):
l[0]+=1
if(df_ia[j,0]==0 and df_ia[j,1]==1):
l[1]+=1
if(df_ia[j,0]==1 and df_ia[j,1]==0):
ab=True
l[2]+=1
if(df_ia[j,0]==1 and df_ia[j,1]==1):
ab=True
l[3]+=1
if(ab):
a+=1
ll.append(l)
# ll.append("\n")
print(a)
for i in ll:
print(i)
for i in range(len(ll)):
d=ll[i]
orr=(d[2]+1)*(d[1]+1)/((d[0]+1)*(d[3]+1))
ll[i].append(orr)
for i in ll:
print(i)
ll.sort(key= lambda x: x[5] )
for i in ll:
print(i)
xx=[]
for i in ll:
if i[5]>=1:
xx.append(x[i[4]])
dicionary=['click','click here','here for','here','find','online','banner','visit','free','discount','offer','book','register','to visit','download','find your','visit our','amazon','flipkart','services','sell','buy','mortgage','house','rent','property','product','shopping','link','microsoft','advertisment','ad','advert','commercial','loan','netscape','yahoo','google','facebook','youtube','info','club','money','bill','site','apply','recommend','online','information','network','premium','join','here to','booking']
pip install transformers
pip install sentence-transformers
from sentence_transformers import SentenceTransformer, util
import numpy as np
model = SentenceTransformer('stsb-roberta-large')
sentence1 = "colloid"
sentence2 = "cream"
# encode sentences to get their embeddings
embedding1 = model.encode(sentence1, convert_to_tensor=True)
embedding2 = model.encode(sentence2, convert_to_tensor=True)
# compute similarity scores of two embeddings
cosine_scores = util.pytorch_cos_sim(embedding1, embedding2)
print("Sentence 1:", sentence1)
print("Sentence 2:", sentence2)
print("Similarity score:", cosine_scores.item())
def scorer(s1,s2):
embedding1=model.encode(s1, convert_to_tensor=True)
embedding2 = model.encode(s2, convert_to_tensor=True)
cosine_scores = util.pytorch_cos_sim(embedding1, embedding2)
return cosine_scores.item()
print(type(x))
def sentence_gen(s):
ss=s.find("*")
s=s[ss+1:]
s=s.replace("+"," ")
return s
print(type(x[1]))
df['width']
sentence_gen("alt*your+here")
print(len(nlp_score))
print(nlp_score)
alt_list=[]
cap_list=[]
for i in range(1428,1539):
d=0
for j in dicionary:
d+=scorer(sentence_gen(x[i]),j)
alt_list.append(d)
for i in range(1539,1558):
d=0
for j in dicionary:
d+=scorer(sentence_gen(x[i]),j)
cap_list.append(d)
print(len(alt_list))
listt=alt_list+cap_list
df=pd.read_csv('Complete_data.csv')
nlp_score=[]
for index, row in df.iterrows():
apple=0
for i in range(1428,1558):
apple+=df.iloc[index][x[i]]*listt[i-1428]
nlp_score.append(apple/(130))
print(nlp_score)
pd.DataFrame(nlp_score,columns=['nlp_synth']).to_csv('nlp_score_synth.csv',index=False)
ns = [0.4383573563626179, 0.10347272151937852, 0.12807405086664053, 0.41382877563054743, 0.0, 0.0, 0.0, 0.852654671898255, 0.0, 0.0, 0.0, 0.0, 0.13678573914445363, 0.0, 0.12807405086664053, 1.417805487017792, 1.4477348412458713, 0.0, 0.0, 0.13678573914445363, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3643643899032703, 0.0, 0.1510341577231884, 0.0, 0.0, 0.0, 0.0, 0.13304106014279218, 0.0, 0.0, 0.4383573563626179, 0.0, 0.5556554651604249, 0.0, 0.8767147127252358, 0.9342806422796387, 0.874901578346124, 0.9571833831186478, 0.0, 0.0, 0.0, 0.0, 1.7383035655778187, 0.0, 0.2463321120120012, 0.7608246485559413, 0.25165000534974613, 0.0, 0.0, 0.0, 0.20878332020141757, 0.12390785562590911, 0.0, 0.2563050588306326, 0.27806692321856435, 0.8592172185962017, 0.0, 0.0, 0.0, 0.45281853343431766, 0.7238674206229356, 1.5567647682932706, 1.0047887635918764, 1.4477348412458713, 0.0, 1.0971561791661841, 0.7238674206229356, 0.37476971751222243, 0.0, 0.0, 0.14481798777213464, 0.12381831888969128, 0.0, 0.11403429445165854, 0.25457934886217115, 0.0, 0.1317415266082837, 0.0, 0.1746036392278396, 0.1746036392278396, 0.1746036392278396, 0.0, 0.0, 0.10376263140485836, 0.8669030904769898, 0.7238674206229356, 0.7238674206229356, 0.7238674206229356, 0.8534207517137894, 0.0, 0.10902992704739937, 0.0, 0.0, 0.26608212028558437, 0.26608212028558437, 0.2407714536556831, 0.0, 0.12967268553777384, 0.0, 0.9755174259726818, 0.12967268553777384, 0.7000394155342992, 0.11309712254084074, 0.0, 0.0, 0.1746036392278396, 0.39893530865128224, 0.5893915140858064, 0.4303353395599585, 0.6210548330241671, 0.29972999050067023, 0.36560761467195474, 0.7614126259890887, 0.7544117451287233, 0.7544117451287233, 0.10902992704739937, 0.11309712254084074, 0.10902992704739937, 0.0, 0.0, 0.37476971751222243, 0.0, 0.0, 0.8767147127252358, 0.0, 0.519329229914225, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1746036392278396, 0.13678573914445363, 0.11566166488023905, 0.0, 0.0, 0.1510341577231884, 0.1510341577231884, 0.0, 0.11888350173830986, 0.13678573914445363, 0.0, 1.2725104025923288, 0.0, 0.4383573563626179, 0.1746036392278396, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.8767147127252358, 0.4126672814098688, 0.5796620456286921, 0.0, 0.0, 0.3714652218640997, 0.9676604081375094, 0.9676604081375094, 0.3714652218640997, 0.8767147127252358, 0.0, 0.3714652218640997, 0.0, 0.0, 0.11265019734318439, 0.0, 0.40884702721467386, 0.14334828584240034, 0.0, 0.0, 0.0, 0.14334828584240034, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9342806422796387, 0.10603597227197427, 0.0, 0.619206467600396, 0.0, 0.0, 1.4477348412458713, 0.26669582260342745, 0.619206467600396, 0.39150820390249674, 0.12807405086664053, 0.0, 0.8039831556093234, 0.5632536763898455, 0.5356073511764408, 0.0, 0.5269380773346012, 0.0, 0.0, 0.5356073511764408, 0.4383573563626179, 0.0, 0.4383573563626179, 0.24956817724383795, 0.8411655294207426, 0.11309712254084074, 0.11309712254084074, 0.2712086973282007, 0.11309712254084074, 0.11309712254084074, 0.852654671898255, 0.0, 0.0, 0.6735185424152476, 0.0, 0.0, 0.0, 0.2495630056382372, 0.25789158381521704, 0.0, 0.3714652218640997, 0.0, 0.0, 0.0, 0.3955396360502793, 0.0, 0.0, 0.11265019734318439, 0.0, 0.0, 0.0, 0.11265019734318439, 0.11265019734318439, 0.0, 0.8328973476703351, 0.0, 0.24956817724383795, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.20757860482598728, 0.0, 0.12967268553777384, 0.11566166488023905, 0.0, 0.0, 0.4383573563626179, 0.0, 0.0, 0.0, 1.0703157210436005, 0.8047273111887849, 0.0, 0.5505350146012811, 0.0, 0.7608246485559413, 0.0, 0.0, 0.5103829058460319, 0.7238674206229356, 0.7238674206229356, 0.9705241339137921, 0.7238674206229356, 1.0836212564546328, 0.7238674206229356, 1.0836212564546328, 1.0836212564546328, 1.0836212564546328, 0.4383573563626179, 0.4383573563626179, 0.4383573563626179, 0.40029019254904524, 0.11309712254084074, 0.390413059007663, 0.38511113352500476, 0.45465741083025935, 1.0318207265379338, 0.45465741083025935, 1.0318207265379338, 0.840765252781029, 0.0711309177084611, 0.6875334479487859, 0.4383573563626179, 0.0, 0.0, 0.8622538648689022, 0.0, 0.0, 0.0, 0.0, 0.0, 0.24685985698149754, 0.0, 0.0, 0.0, 0.11309712254084074, 0.0, 0.0, 0.0, 1.2180815001519827, 0.12433093872207862, 0.0, 0.0, 0.5621756752523092, 0.06830710247159004, 0.0, 0.0, 0.0, 0.7156121545686172, 0.0, 0.0, 0.25436418135292255, 0.0, 0.8622538648689022, 0.48411541920728407, 0.6059620088969285, 0.4874875855273925, 0.6315288291241113, 0.0, 0.0, 0.11566166488023905, 1.2180815001519827, 0.0, 0.4383573563626179, 1.377119022837052, 0.7238674206229356, 0.0, 0.0, 0.0, 0.13678573914445363, 0.8315305976340404, 0.0, 0.11309712254084074, 0.0, 0.4383573563626179, 0.4383573563626179, 0.3714652218640997, 1.2180815001519827, 0.0, 0.0, 0.2590099507226394, 0.0, 0.0, 0.0, 0.0, 0.3876965424475762, 0.8777779664378613, 0.671848807311975, 0.0, 0.8904172331381303, 0.6214872679888056, 0.7066019622322458, 0.0, 0.9855494797946169, 0.8395290855031747, 0.0, 0.0, 0.0, 0.0, 0.3714652218640997, 0.3479623925084105, 0.06830710247159004, 0.0, 0.0, 0.45242821057685295, 0.0, 0.0, 0.20814779627208527, 0.11888350173830986, 0.0, 0.0, 0.0, 0.7302913521917966, 0.11403429445165854, 0.1943219300216207, 0.0, 0.0, 0.4882664613425732, 0.0, 0.0, 0.0, 0.07276624113034744, 0.0, 0.08118526571645186, 0.0, 0.0, 0.0, 0.07276624113034744, 0.0, 0.10902992704739937, 0.0, 0.0, 0.0, 0.13678573914445363, 0.11403429445165854, 0.47689617730390566, 0.10902992704739937, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3153379072757581, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11872643149242951, 0.0, 0.0, 0.5125572530409465, 0.0, 0.0, 1.335953715730172, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.38130970385212165, 0.0, 0.0, 0.0, 0.0, 0.0997928231381453, 0.0, 1.492515267754117, 0.0, 0.22984565863242515, 0.0, 0.0, 0.13678573914445363, 0.12582841840787576, 0.0, 0.3770236559212208, 0.0, 0.0, 0.23947998376993032, 0.0, 0.0, 0.0667385809410077, 0.0, 0.0, 0.0, 0.0, 0.14334828584240034, 0.0, 0.10923841503950266, 0.0, 0.08118526571645186, 0.0, 0.5020474526171501, 0.0, 0.0, 0.44423196469075404, 0.14338713546211904, 0.11403429445165854, 0.0, 0.0, 0.0, 0.0, 0.2626141575523294, 0.2599457369114344, 0.0, 0.0, 0.0, 0.0, 0.3179962848456433, 0.0, 0.12315999776698076, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11566166488023905, 0.0, 0.13678573914445363, 0.0, 0.0, 0.09669635424820276, 0.10347272151937852, 0.3076647049819048, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.10923841503950266, 0.09669635424820276, 0.0, 0.0, 0.2128444280761939, 0.0, 0.0, 0.0, 0.0, 0.6079573236978971, 0.0, 0.11566166488023905, 0.0, 0.14334828584240034, 0.0, 0.11070223904859561, 0.0, 0.1510341577231884, 0.0, 0.0, 0.09767914700966615, 0.11566166488023905, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11323630698025226, 0.0, 0.0, 0.0, 0.0, 0.12955333109085376, 0.0, 0.5345358367937688, 0.07276624113034744, 0.47689617730390566, 0.0, 0.23576544051488432, 0.12484515286408938, 0.0, 0.07276624113034744, 0.0, 0.07276624113034744, 0.0, 0.0, 0.0, 0.4383573563626179, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0997928231381453, 0.13304106014279218, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.21511825349611732, 0.13809389472007752, 0.11070223904859561, 0.0, 0.0890261091865026, 0.0, 0.0, 0.30621257298267807, 0.0, 0.0, 0.09767914700966615, 0.25465548230478396, 0.0, 0.11888350173830986, 0.12489632002722759, 0.0, 0.18016448622712722, 0.0, 0.0, 0.0, 0.0, 0.19679589834637368, 0.0, 0.11566166488023905, 0.2811461722908112, 0.0, 0.6166678567058765, 0.0, 0.0, 0.0, 0.0, 0.10376263140485836, 0.0, 0.0, 0.0, 0.0, 0.11735329734018216, 0.47689617730390566, 0.0, 0.0997928231381453, 0.0, 0.07529509663581849, 0.15241682486465344, 0.0, 0.27131517463578625, 0.0, 0.13891768398193213, 0.0, 0.0, 0.0890261091865026, 0.10902992704739937, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.22581184833095624, 0.5625221740740997, 0.0, 0.09767914700966615, 0.0890261091865026, 0.0, 0.0, 0.12381831888969128, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4879452966798383, 0.4333535043665996, 0.12582841840787576, 0.11872643149242951, 0.5625221740740997, 0.767072889337746, 0.0, 0.0, 0.0, 0.11403429445165854, 0.0, 0.8395290855031747, 0.9610882612088552, 0.0, 0.06830710247159004, 0.0, 0.0, 0.38432187063332934, 0.0, 0.0, 0.0, 0.12745547867738283, 0.0, 0.11888350173830986, 0.0, 0.07276624113034744, 0.0, 0.10435670171147929, 0.0, 0.2463827097072051, 0.0, 0.0, 0.4879452966798383, 0.25439848395494313, 0.0, 0.07276624113034744, 0.0, 0.23906472538812804, 0.0, 0.0, 0.09767914700966615, 0.0, 0.126718714260138, 0.0, 0.22831186222342345, 0.13891768398193213, 0.0, 0.11888350173830986, 0.0, 0.0, 0.07276624113034744, 0.0, 0.0, 0.0, 0.0997928231381453, 0.0, 0.0, 0.14986499525033511, 0.0, 0.0, 0.0, 0.28987149210073626, 0.0, 0.12582841840787576, 0.06830710247159004, 0.47689617730390566, 0.0, 0.0, 0.0, 0.25551217063688314, 0.1965845600200387, 0.0, 0.0, 0.12955333109085376, 0.0, 0.10502068879655921, 0.15241682486465344, 0.0, 0.0, 0.0, 0.11872643149242951, 0.10189476580573963, 0.5732327961828559, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11566166488023905, 0.0, 0.0, 0.0, 0.3076647049819048, 0.0, 0.0, 0.43537505951065286, 0.0, 0.27937935542028686, 0.08690483181809004, 0.10923841503950266, 0.0, 0.0, 0.0, 0.0, 0.15241682486465344, 0.0, 0.0, 0.0, 0.06830710247159004, 0.15444766264408827, 0.1269625305556334, 0.26552666013057413, 0.0, 0.14222848830887905, 0.0, 0.0, 0.4879452966798383, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5625221740740997, 0.11566166488023905, 0.0, 0.0, 0.0, 0.3889500736975326, 0.2272139037744357, 0.0, 0.0, 0.0, 0.5625221740740997, 0.0, 0.0, 0.0, 0.21168392511227957, 0.2937215141307276, 0.0, 0.0, 0.0, 0.11566166488023905, 0.13407326492552574, 0.06830710247159004, 0.0, 0.11566166488023905, 0.22985429116166556, 0.0, 0.09669635424820276, 0.0, 0.0, 0.1004035013799484, 0.0, 0.0667385809410077, 0.0, 0.25337164998054507, 0.0, 0.0, 0.3076647049819048, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.15241682486465344, 0.17723203633840268, 0.0, 0.0, 0.0, 0.0, 0.5451812482868823, 0.0, 0.0, 0.17712294284182672, 0.0, 0.5451812482868823, 0.0, 0.06830710247159004, 0.0, 0.5732327961828559, 0.0, 0.37159331663010214, 0.15241682486465344, 0.0, 0.0, 0.0, 0.2846529751084745, 0.0, 0.47689617730390566, 0.0, 0.09767914700966615, 0.22149746589935743, 0.0, 0.0, 0.0, 0.0997928231381453, 0.6076542510507772, 0.0, 0.12433093872207862, 0.0, 0.11888350173830986, 0.0, 0.0, 0.0, 0.10376263140485836, 0.19679589834637368, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4383573563626179, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.23576544051488432, 0.0, 0.0, 0.0, 0.0, 0.3153379072757581, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.10435670171147929, 0.5732327961828559, 0.0, 0.0, 0.12315999776698076, 0.0, 0.0, 0.2414900832881148, 0.0, 0.5052690942270252, 0.13809389472007752, 0.0, 0.0, 0.0, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0997928231381453, 0.0, 0.0, 0.0, 0.0, 0.3576337461288159, 0.0, 0.13809389472007752, 0.11566166488023905, 0.10923841503950266, 0.3893972552739657, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11309712254084074, 0.0, 0.0, 0.0, 0.0, 0.09669635424820276, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.14481798777213464, 0.0, 0.0, 0.3076647049819048, 1.2254618909783088, 0.0, 0.5677853113004508, 0.10923841503950266, 0.0, 0.5451812482868823, 0.0, 0.1268795377933062, 0.0, 0.0, 0.33594236108474435, 0.0, 0.0, 0.1339065123635989, 0.37476971751222243, 0.0, 0.0, 0.0, 0.1268795377933062, 0.0, 0.24870272502303123, 0.1339065123635989, 0.2524474040246927, 0.0, 0.12955333109085376, 0.1268795377933062, 0.0, 0.0, 0.0, 0.1268795377933062, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13304106014279218, 0.4383573563626179, 0.0, 0.13809389472007752, 0.0, 0.1339065123635989, 0.24956817724383795, 0.0, 0.0, 0.13809389472007752, 0.11566166488023905, 0.20410835777337735, 0.3889500736975326, 0.09669635424820276, 0.0, 0.0, 0.0, 0.0, 0.22925302173870687, 0.0, 0.0, 0.0, 0.07276624113034744, 0.0, 0.0, 0.2524474040246927, 0.3076647049819048, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.10776978915987107, 0.0667385809410077, 0.0, 0.0, 0.0, 0.3889500736975326, 0.22809357012693698, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13678573914445363, 0.13809389472007752, 0.13678573914445363, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5033348432383858, 0.0, 0.23544273046919934, 0.0, 0.1339065123635989, 0.25904880034235805, 0.06830710247159004, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06830710247159004, 0.14334828584240034, 0.3153379072757581, 0.0997928231381453, 0.0, 0.09669635424820276, 0.0, 0.0, 0.20410835777337735, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.26669582260342745, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.10435670171147929, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11888350173830986, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0997928231381453, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09027310063202794, 0.1339065123635989, 0.0, 0.1004035013799484, 0.0, 0.1339065123635989, 0.0, 0.0, 0.13304106014279218, 0.5052690942270252, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11265019734318439, 0.0, 0.0, 0.0, 0.0, 0.0, 0.17723203633840268, 0.08118526571645186, 0.06830710247159004, 0.47476620161905886, 0.0, 0.0, 0.1004035013799484, 0.0, 0.0, 0.0, 0.0, 0.21610151903990368, 0.0, 0.0, 0.08118526571645186, 0.3153379072757581, 0.0, 0.2062514675172189, 0.13678573914445363, 0.13678573914445363, 0.0, 0.0, 0.0, 0.13304106014279218, 0.13809389472007752, 0.0, 0.0, 0.0, 0.10502068879655921, 0.23576544051488432, 0.0, 0.12604808675555082, 0.22809357012693698, 0.0, 0.0, 0.09027310063202794, 0.0, 0.0, 0.47689617730390566, 0.0, 0.0, 0.0, 0.0, 0.12745547867738283, 0.3556304992785534, 0.0, 0.0, 0.06830710247159004, 0.3893972552739657, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1339065123635989, 0.0, 0.10435670171147929, 0.0, 0.0, 0.0, 0.09669635424820276, 0.0, 0.09669635424820276, 0.1339065123635989, 0.0, 0.33927280690807565, 0.0, 0.1577552034985274, 0.0, 0.0, 0.5625221740740997, 0.4703290909743653, 0.0, 0.12433093872207862, 0.0, 0.0, 0.0, 0.0, 0.1577552034985274, 0.0, 0.0, 0.3153379072757581, 0.0, 0.4383573563626179, 0.0, 0.0, 0.0, 0.17723203633840268, 0.0, 0.11888350173830986, 0.39619779784160736, 0.0, 0.0, 0.6365400746178168, 0.0, 0.0, 0.0, 0.0, 0.5625221740740997, 0.0, 0.3576337461288159, 0.0, 0.0, 0.19674678762944844, 0.0, 0.147496855746095, 0.0, 0.3576337461288159, 0.06830710247159004, 0.0, 0.3153379072757581, 0.12604808675555082, 0.20920160899941737, 0.0, 0.0, 0.1268795377933062, 0.06830710247159004, 0.0, 0.0, 0.2524474040246927, 0.0, 0.06830710247159004, 0.0, 0.5433780451591771, 0.0, 0.2524474040246927, 0.12955333109085376, 0.0, 0.0, 0.19395701310669, 0.5052690942270252, 0.06830710247159004, 0.0, 0.6248922458348366, 0.0, 0.11566166488023905, 0.1268795377933062, 0.08340536310122564, 0.0, 0.1268795377933062, 0.0, 0.1577552034985274, 0.0, 0.0, 0.0, 0.0997928231381453, 0.0, 0.0997928231381453, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11888350173830986, 0.12381831888969128, 0.0, 0.0, 0.3893972552739657, 0.0, 0.0, 0.0, 0.0, 0.24870272502303123, 0.0, 0.47689617730390566, 0.0, 0.0, 0.11309712254084074, 0.11566166488023905, 0.1268795377933062, 0.06830710247159004, 0.0, 0.0, 0.0, 0.13891768398193213, 0.0, 0.0, 0.3556304992785534, 0.28987149210073626, 0.147496855746095, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13304106014279218, 0.06830710247159004, 0.13891768398193213, 0.0, 0.10603597227197427, 0.0, 0.0, 0.0, 0.5625221740740997, 0.0, 0.0, 0.06830710247159004, 0.1268795377933062, 0.0, 0.0, 0.0, 0.0, 0.09767914700966615, 0.0, 0.0, 0.4383573563626179, 0.12381831888969128, 0.0, 0.0, 0.13304106014279218, 0.0, 0.0, 0.10603597227197427, 0.0, 0.28987149210073626, 0.0, 0.126718714260138, 0.3076647049819048, 0.11566166488023905, 0.0, 0.0, 0.0, 0.0, 0.6262954244819972, 0.0, 0.0, 0.0, 0.11309712254084074, 0.6879255336064559, 0.0, 0.09767914700966615, 0.0, 0.147496855746095, 0.06830710247159004, 0.0, 0.8577739329865346, 0.11735329734018216, 0.30621257298267807, 0.09697436185983511, 0.37159331663010214, 0.4879452966798383, 0.0, 0.0, 0.3076647049819048, 0.0, 0.10435670171147929, 0.12582841840787576, 0.0, 0.12441464993529595, 0.10923841503950266, 0.31689718518979276, 0.0, 0.0, 0.0, 0.1268795377933062, 0.0, 0.2778590479149268, 1.534313943910484, 0.14334828584240034, 0.0, 0.0, 0.1268795377933062, 0.0667385809410077, 0.0, 0.06946224717853161, 0.3153379072757581, 0.0, 0.0, 0.0997928231381453, 0.11566166488023905, 0.09669635424820276, 0.5732327961828559, 0.0, 0.0, 0.11888350173830986, 0.12381831888969128, 0.0, 0.0, 0.12604808675555082, 0.0, 0.0, 0.0, 0.0, 0.1268795377933062, 0.0, 0.0, 0.4831451114410391, 0.0, 0.0, 0.14334828584240034, 0.0, 0.0, 0.0, 0.0, 0.11566166488023905, 0.0, 0.0, 0.0, 0.0, 0.13140873880340503, 0.0, 0.0, 0.11888350173830986, 0.0, 0.0, 0.0, 0.0, 0.0, 0.21263602674007415, 0.13891768398193213, 0.0, 0.0, 0.0, 0.0, 0.1268795377933062, 0.0, 0.0, 0.0, 0.0, 0.0, 0.47689617730390566, 0.0, 0.0997928231381453, 0.0, 0.0, 0.1268795377933062, 0.0, 0.0, 0.1339065123635989, 0.09767914700966615, 0.0, 0.0, 0.0, 0.0, 0.1268795377933062, 0.1317415266082837, 0.0, 0.3153379072757581, 0.0, 0.11872643149242951, 0.0, 0.0, 0.0, 0.4588595024524973, 0.0, 0.10923841503950266, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0997928231381453, 0.21610151903990368, 0.0, 0.5732327961828559, 0.10603597227197427, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1269625305556334, 0.0, 0.0, 0.12381831888969128, 0.0, 0.0, 0.4879452966798383, 0.10603597227197427, 0.47689617730390566, 0.0, 0.0, 0.0, 0.0, 0.35372341381959044, 0.06830710247159004, 0.5625221740740997, 0.13891768398193213, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.20410835777337735, 0.0, 0.0, 0.0, 0.09669635424820276, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0667385809410077, 0.0, 0.0, 0.0, 0.08118526571645186, 0.6681350641382429, 0.0, 1.2256529224033539, 0.13678573914445363, 0.0, 0.0, 0.0, 0.345399079271234, 0.0, 0.0, 0.3332143522942295, 0.0, 0.0, 0.0, 0.13809389472007752, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09767914700966615, 0.0, 0.0, 0.4831451114410391, 0.36590473322341077, 0.0, 0.0, 0.0, 0.28987149210073626, 0.0, 0.13891768398193213, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.47797245028643653, 0.0, 0.0, 0.37159331663010214, 0.13678573914445363, 0.11671629897676981, 0.47689617730390566, 0.0, 0.0, 0.0, 0.0, 0.4831451114410391, 0.11872643149242951, 0.0, 0.0, 0.126718714260138, 0.147496855746095, 0.5732327961828559, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.147496855746095, 0.0, 0.0, 0.0, 0.0, 0.0, 0.345399079271234, 0.0, 0.0, 0.147496855746095, 0.4839194644958927, 0.0, 0.0, 0.0, 0.0, 0.1269625305556334, 0.0, 0.0, 0.09027310063202794, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11323630698025226, 0.23871050712007744, 0.11309712254084074, 0.0997928231381453, 0.0, 0.0, 0.0, 0.13809389472007752, 0.13809389472007752, 0.0, 0.0, 0.0, 0.09767914700966615, 0.0, 0.10923841503950266, 0.0, 0.0, 0.1339065123635989, 0.0, 0.0, 0.20081184586653342, 0.5975694566391981, 0.5732327961828559, 0.0, 0.0, 0.0, 0.43099957215599716, 0.0, 0.11566166488023905, 0.0, 0.0, 0.0, 0.13891768398193213, 0.0, 0.0, 0.0, 0.2521539909621844, 0.0, 0.3076647049819048, 0.24870272502303123, 0.13891768398193213, 0.13809389472007752, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.10502068879655921, 0.28987149210073626, 0.11566166488023905, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.37476971751222243, 0.13809389472007752, 0.06830710247159004, 0.23576544051488432, 0.0, 0.13891768398193213, 0.0, 0.0, 0.1269625305556334, 0.09767914700966615, 0.0, 0.26563562798815277, 0.0, 0.0, 0.0, 0.0, 0.28987149210073626, 0.0, 0.0, 0.489190078412111, 0.0, 0.5625221740740997, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.08085989056584927, 0.0, 0.0, 0.11888350173830986, 0.0, 0.28987149210073626, 0.0, 0.230880874223434, 0.10189476580573963, 0.0, 0.16906016666728715, 0.11872643149242951, 0.0, 0.08118526571645186, 0.23580127816933852, 0.0, 0.0, 0.0, 0.38282826961233063, 0.0, 0.1339065123635989, 0.0, 0.1359528959943698, 0.0, 0.2576441154743616, 0.0, 0.24072822090238333, 0.29073342606425284, 0.0, 0.0, 0.0, 0.0, 0.2823254175914022, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1510341577231884, 0.10603597227197427, 0.0, 0.23580127816933852, 0.0, 0.0, 0.12315999776698076, 0.20410835777337735, 0.0, 0.23947998376993032, 0.0, 0.24431582789581555, 0.0, 0.0, 0.0, 0.0, 0.449244419639357, 0.0, 0.11403429445165854, 0.0, 0.0, 0.2590099507226394, 0.0, 0.0, 0.0, 0.10502068879655921, 0.12489632002722759, 0.10603597227197427, 0.0, 0.0, 0.0, 0.13678573914445363, 0.10923841503950266, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11888350173830986, 0.0, 0.0, 0.0, 0.10902992704739937, 0.10923841503950266, 0.09767914700966615, 0.37349567146828544, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3921012176702229, 0.08118526571645186, 0.0, 0.12582841840787576, 0.0, 0.0, 0.11872643149242951, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.16906016666728715, 0.0, 0.0, 0.0, 0.17723203633840268, 0.0, 0.23947998376993032, 0.0, 0.0, 0.22809357012693698, 0.0, 0.10923841503950266, 0.0, 0.09767914700966615, 0.0, 0.0, 0.23947998376993032, 0.11403429445165854, 0.0, 0.0, 0.0, 0.10902992704739937, 0.0, 0.0, 0.12604808675555082, 0.0, 0.0, 0.0, 0.0, 0.11566166488023905, 0.0, 0.449244419639357, 0.11403429445165854, 0.0, 0.0, 0.0, 0.0, 0.1339065123635989, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.12381831888969128, 0.0, 0.27806692321856435, 0.0, 0.10902992704739937, 0.0, 0.0, 0.0, 0.147496855746095, 0.0, 0.06946224717853161, 0.0, 0.0, 0.0, 0.11403429445165854, 0.25457934886217115, 0.0, 0.0, 0.11872643149242951, 0.0, 0.0, 0.0, 0.23580127816933852, 0.0, 0.0, 0.5677853113004508, 0.0, 0.0, 0.0, 0.13891768398193213, 0.0, 0.0, 0.23580127816933852, 0.0, 0.126718714260138, 0.0, 0.0, 0.8998828885790248, 0.0, 0.0, 0.10776978915987107, 0.0, 0.0, 0.0, 0.0, 0.0, 0.10902992704739937, 0.0, 0.0, 0.0, 0.0, 0.14334828584240034, 0.0, 0.23947998376993032, 0.0, 0.0, 0.0, 0.11566166488023905, 0.0, 0.0, 0.0, 0.0, 0.0, 0.23576544051488432, 0.0, 0.11403429445165854, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06951488457047023, 0.0, 0.0, 0.0, 0.11070223904859561, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11566166488023905, 0.0, 0.0, 0.0, 0.0, 0.0, 0.37476971751222243, 0.09669635424820276, 0.0, 0.0, 0.5451812482868823, 0.12582841840787576, 0.0, 0.0, 0.10902992704739937, 0.0, 0.0, 0.25082003359611216, 0.11323630698025226, 0.0, 0.0, 0.0, 0.3153379072757581, 0.147496855746095, 0.10776978915987107, 0.0, 0.0, 0.4920678263100294, 1.1015395468100906, 0.0, 0.0, 0.500595094779363, 0.0, 0.13891768398193213, 0.0, 0.0711309177084611, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2599545991191497, 0.0, 0.10923841503950266, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11566166488023905, 0.3153379072757581, 0.22889797186049132, 0.0, 0.3076647049819048, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.12315999776698076, 0.0, 0.0, 0.0, 0.0, 0.23336086576947798, 0.3889500736975326, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.10902992704739937, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6094514987503107, 0.0, 0.0, 0.0, 0.39401900183696015, 1.335953715730172, 0.0, 0.0, 0.0, 0.0, 0.0997928231381453, 0.10902992704739937, 0.10189476580573963, 0.0, 0.12381831888969128, 0.0, 0.0, 0.0, 0.0, 0.0997928231381453, 0.0, 0.3076647049819048, 0.0, 0.0, 0.0, 0.28987149210073626, 0.0, 0.0, 0.10923841503950266, 0.0, 0.09669635424820276, 0.0, 0.19328972523888716, 0.0, 0.11566166488023905, 0.06024866509609497, 0.0, 0.449244419639357, 0.5625221740740997, 0.47689617730390566, 0.0, 0.0, 0.09669635424820276, 0.0, 0.10189476580573963, 0.0, 0.0, 0.0667385809410077, 0.0, 0.11888350173830986, 0.13891768398193213, 0.0, 0.06830710247159004, 0.09669635424820276, 0.09669635424820276, 0.0, 0.0, 0.0, 0.0, 0.09669635424820276, 0.19437550125786893, 0.3076647049819048, 0.0997928231381453, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1339065123635989, 0.0, 0.22001836659171833, 0.10923841503950266, 0.11566166488023905, 0.0, 0.0, 0.23366803893676172, 0.0, 0.12604808675555082, 0.10923841503950266, 0.0, 0.28987149210073626, 0.0, 0.5732327961828559, 0.3058524375494856, 0.0, 0.13678573914445363, 0.0, 0.0, 0.0, 0.11323630698025226, 0.3889500736975326, 0.20506665286822962, 0.0, 0.11566166488023905, 0.07276624113034744, 0.0, 0.3889500736975326, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5625221740740997, 0.0, 0.0, 0.4879452966798383, 0.1339065123635989, 0.0, 0.11323630698025226, 0.0, 0.09669635424820276, 0.09669635424820276, 0.0, 0.09669635424820276, 0.06830710247159004, 0.0, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.06024866509609497, 0.0, 0.0, 0.5625221740740997, 0.0, 0.10502068879655921, 0.0, 0.06830710247159004, 0.5451812482868823, 0.0, 0.0, 0.13304106014279218, 0.0, 0.0, 0.13891768398193213, 0.0, 0.0, 0.0, 1.335953715730172, 0.11888350173830986, 0.0, 0.0997928231381453, 0.0, 0.0, 0.19328972523888716, 0.23947998376993032, 0.4879452966798383, 0.0, 0.0, 0.0, 0.0, 0.07276624113034744, 0.0, 0.2599545991191497, 0.0, 0.37159331663010214, 0.12955333109085376, 0.0, 0.47689617730390566, 0.0, 0.21610151903990368, 0.12315999776698076, 0.10376263140485836, 0.0, 0.0, 0.13678573914445363, 0.3089513901191262, 0.09669635424820276, 0.0, 0.07276624113034744, 0.2334820933926564, 0.0, 0.0, 0.13678573914445363, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1943219300216207, 0.14334828584240034, 0.0, 0.0997928231381453, 0.0, 0.0, 0.06024866509609497, 0.0, 0.5732327961828559, 0.0, 0.0, 0.0, 0.1943219300216207, 0.0, 0.10189476580573963, 0.0, 0.0, 0.5136874014941546, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1933332034601615, 0.0, 0.47689617730390566, 0.7630754736180488, 0.0, 0.0, 1.147213138697239, 0.0, 0.07276624113034744, 0.0, 0.06830710247159004, 0.11888350173830986, 0.0, 0.2025559447132624, 0.0, 0.21610151903990368, 0.5625221740740997, 0.0, 0.0, 0.0, 0.21610151903990368, 0.0, 0.147496855746095, 0.0, 0.0, 0.09669635424820276, 0.09669635424820276, 0.0, 0.0, 0.0, 0.1004035013799484, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3403809590981557, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.09669635424820276, 0.0997928231381453, 0.09669635424820276, 0.21433287234260487, 0.0, 0.0, 0.0, 0.13304106014279218, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2272139037744357, 0.0, 0.23705462586994355, 0.5732327961828559, 0.0, 0.0, 0.0, 0.22984565863242515, 0.0, 0.13304106014279218, 0.0, 0.0, 0.14334828584240034, 0.13407326492552574, 0.0, 0.0, 0.0, 0.0, 0.1339065123635989, 0.0, 0.3893972552739657, 0.08118526571645186, 0.0, 0.0, 0.0, 0.10923841503950266, 0.0, 0.2272706014319108, 0.0, 0.0, 0.3076647049819048, 0.09767914700966615, 0.0, 0.3076647049819048, 0.0, 0.06830710247159004, 0.0, 0.12955333109085376, 0.14481798777213464, 0.0, 0.3343746745815644, 0.0, 0.13140873880340503, 0.0, 0.10902992704739937, 0.0, 0.147496855746095, 0.0, 0.5986728169597112, 0.28987149210073626, 0.0, 0.0, 0.0, 0.24870272502303123, 0.0, 0.0, 0.0, 0.0, 0.09669635424820276, 0.0, 0.0, 0.47797245028643653, 0.11888350173830986, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.12604808675555082, 0.0, 0.0, 0.10435670171147929, 0.11309712254084074, 0.0, 0.0, 0.09669635424820276, 0.1359528959943698, 0.0, 0.13140873880340503, 0.0, 0.0, 0.0, 0.11872643149242951, 0.09669635424820276, 0.0, 0.2587516652276883, 0.0, 0.0, 0.47689617730390566, 0.06830710247159004, 0.2025559447132624, 0.0, 0.0, 0.0, 0.0, 0.4000563544937624, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13140873880340503, 0.1510341577231884, 0.21433287234260487, 0.24431582789581555, 0.390493191835972, 0.0, 0.0, 0.0, 0.0, 0.13891768398193213, 0.11671629897676981, 0.09669635424820276, 0.0, 0.0, 0.25178527028228226, 0.0, 0.0, 0.07276624113034744, 0.0, 0.0, 0.0997928231381453, 0.10502068879655921, 0.0, 0.07276624113034744, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11566166488023905, 0.0, 0.48547195656081804, 0.0, 0.0, 0.21610151903990368, 0.09669635424820276, 0.0, 0.0, 0.0, 0.15444766264408827, 0.0, 0.0, 0.0, 0.09669635424820276, 0.24236466481995125, 0.4383573563626179, 1.335953715730172, 0.0, 0.0, 0.09697436185983511, 0.0, 0.0, 0.0, 0.0, 0.0, 0.26518491424906715, 0.11403429445165854, 0.0, 0.11403429445165854, 0.0, 0.0, 0.0, 0.18842790601058648, 0.20378953161147925, 0.1339065123635989, 0.0, 0.0, 0.23576544051488432, 0.147496855746095, 0.0, 0.0, 0.0, 0.4831451114410391, 0.6070340184065012, 0.0, 0.12381831888969128, 0.19679589834637368, 0.2378358963303841, 0.0, 0.0, 0.2272139037744357, 0.0, 0.0, 0.25627204535099174, 0.0, 1.335953715730172, 0.0667385809410077, 0.09697436185983511, 0.0, 0.12381831888969128, 0.0, 0.0, 0.0, 0.24967937079759744, 0.0, 0.0, 0.20410835777337735, 0.0, 0.0, 0.06830710247159004, 0.0, 0.12967268553777384, 0.0, 0.07276624113034744, 0.0, 0.14228112570081766, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0667385809410077, 0.0, 0.13678573914445363, 0.4231076964356292, 0.0, 0.0, 0.0, 0.09697436185983511, 0.0, 0.0, 0.0, 0.0, 0.30621257298267807, 0.1577516565959041, 0.0, 0.0, 0.0, 0.11566166488023905, 0.0, 0.11888350173830986, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5722638687262168, 0.0, 1.335953715730172, 0.0, 0.0, 0.0, 0.0, 0.3889500736975326, 0.09767914700966615, 0.0, 0.0, 0.19679589834637368, 0.0, 0.12484515286408938, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1339065123635989, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3889500736975326, 0.14334828584240034, 0.12484515286408938, 0.0, 0.0, 0.0, 0.3076647049819048, 0.0, 0.0, 0.37476971751222243, 0.0, 0.0, 0.20022171980773026, 0.5033348432383858, 0.08118526571645186, 0.13407326492552574, 0.05339850178704812, 0.0, 0.07276624113034744, 0.23576544051488432, 0.10376263140485836, 0.12381831888969128, 0.0, 0.0, 0.5271865423768759, 0.0, 0.9914974634893812, 0.0997928231381453, 0.06830710247159004, 0.0, 0.0, 0.14986499525033511, 0.06830710247159004, 0.0, 0.0, 0.0667385809410077, 0.0, 0.0, 0.0, 0.12381831888969128, 0.11566166488023905, 0.07276624113034744, 0.0, 0.0, 0.0, 0.0, 0.20022171980773026, 0.22809357012693698, 0.0, 0.0, 0.0, 0.20814779627208527, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.18016448622712722, 0.0, 0.6681350641382429, 0.0, 0.18016448622712722, 0.0, 0.0, 0.21168392511227957, 0.0, 0.309820867000291, 0.0, 0.0, 0.0, 0.0, 0.0, 0.449244419639357, 0.0, 0.0, 0.0, 0.0, 0.0, 0.16906016666728715, 0.10923841503950266, 0.06946224717853161, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1373021849646018, 0.0, 0.0, 0.16906016666728715, 0.3076647049819048, 0.1373021849646018, 0.0, 0.0, 0.0, 0.12604808675555082, 0.16906016666728715, 0.0997928231381453, 0.2814033681096939, 0.0, 0.0, 0.0, 0.0, 0.0, 0.21433287234260487, 0.0, 0.06830710247159004, 0.0, 0.0, 0.449244419639357, 0.0, 0.0, 0.0, 0.5732327961828559, 0.0, 0.11888350173830986, 0.0, 0.11888350173830986, 0.0, 0.0, 0.0, 0.0, 0.05339850178704812, 0.47689617730390566, 0.0, 0.0, 0.09767914700966615, 0.47689617730390566, 0.0, 0.0, 0.0, 0.0, 0.16906016666728715, 0.25832116229889485, 0.0, 0.5974480479812393, 0.0, 0.0, 0.22831186222342345, 0.11070223904859561, 0.0997928231381453, 0.0, 0.12469149581514873, 0.0, 0.2529567666638356, 0.5625221740740997, 0.0, 0.0, 0.23947998376993032, 1.0962739311158658, 0.13177327313102208, 0.06830710247159004, 0.0, 0.12433093872207862, 0.0, 0.3733864961335292, 0.0, 0.0, 0.0, 0.10776978915987107, 0.0, 0.0, 0.06830710247159004, 0.0, 0.0997928231381453, 0.23947998376993032, 0.0, 0.0, 0.0, 0.0, 0.11070223904859561, 0.0, 0.0, 0.0, 0.14481798777213464, 0.9664059659036306, 0.1510341577231884, 0.0, 0.17316974251029582, 0.0, 0.27725479820599924, 0.449244419639357, 0.0, 0.07365845639545184, 0.11566166488023905, 0.0, 0.0, 0.11070223904859561, 0.12381831888969128, 0.0997928231381453, 0.0, 0.0, 0.0, 0.5625221740740997, 0.37159331663010214, 0.0, 0.0, 0.5625221740740997, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06830710247159004, 0.10923841503950266, 0.11566166488023905, 0.0, 0.06830710247159004, 0.2272139037744357, 0.0, 0.09767914700966615, 0.0, 0.0, 0.0, 0.449244419639357, 0.5732327961828559, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3076647049819048, 0.0, 0.43460153739612833, 0.4879452966798383, 0.0, 0.33092019552221663, 0.0, 0.22809357012693698, 0.11566166488023905, 0.0, 0.5625221740740997, 0.0, 0.31997207729862287, 0.0, 0.0, 0.21168392511227957, 0.0, 0.0, 0.0, 0.0, 0.2062514675172189, 0.0, 0.3584215396298812, 0.0, 0.0, 0.26767922456447896, 0.0, 0.0, 0.0, 0.23868050495019325, 0.0, 0.12955333109085376, 0.47689617730390566, 0.0, 0.14222848830887905, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0997928231381453, 0.0, 0.6929367052247891, 0.0, 0.0, 0.0, 0.12381831888969128, 0.0, 0.0, 0.0, 0.19216786643776756, 0.0, 0.0, 0.0, 0.0, 0.06946224717853161, 0.11888350173830986, 0.0, 0.11566166488023905, 0.0, 0.4879452966798383, 0.11888350173830986, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.07276624113034744, 0.0, 0.0, 0.1268795377933062, 0.0, 0.47689617730390566, 1.2826330178632186, 0.06830710247159004, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4879452966798383, 0.0, 0.0, 0.0, 0.0, 0.0, 0.07276624113034744, 0.7238674206229356, 0.06946224717853161, 0.5625221740740997, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06830710247159004, 0.09767914700966615, 0.12604808675555082, 0.28987149210073626, 0.0, 0.0, 0.0, 0.0, 0.3076647049819048, 0.0, 0.28411283725156233, 0.0, 0.0, 0.3479623925084105, 0.0, 0.5936106048237819, 0.0, 0.0, 0.0, 0.2477261745156004, 0.26168205917168125, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09767914700966615, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0997928231381453, 0.0, 0.0, 0.0, 0.0, 0.10923841503950266, 0.0, 0.0, 0.0, 0.0, 0.3076647049819048, 0.0, 0.13678573914445363, 0.28411283725156233, 0.12381831888969128, 0.0, 0.0, 0.5591498447725406, 0.0, 0.0, 0.38637356689343083, 0.0, 0.0, 0.0, 0.0, 0.13407326492552574, 0.0, 0.0, 0.0, 0.0, 0.07276624113034744, 0.3153379072757581, 0.0, 0.0, 0.10502068879655921, 0.0, 0.13304106014279218, 0.0, 0.38784487307644805, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.126718714260138, 0.0, 0.0, 0.1269625305556334, 0.06830710247159004, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11872643149242951, 0.45242821057685295, 0.07276624113034744, 0.0, 0.0, 0.0, 0.12489632002722759, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2607331627263473, 0.09767914700966615, 0.2234314540401101, 0.0, 0.0, 0.0, 0.11872643149242951, 0.0, 0.0, 0.3841197394980834, 0.0, 0.0, 0.0, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2234314540401101, 0.0, 0.08118526571645186, 0.0, 0.10923841503950266, 0.0, 0.0, 0.0, 0.3076647049819048, 0.37751396004683696, 0.0, 0.11872643149242951, 0.0, 0.0, 0.0, 0.0, 0.4016773668046181, 0.0, 0.0, 0.0, 0.0, 0.10435670171147929, 0.4383573563626179, 0.0, 0.0, 0.4383573563626179, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4383573563626179, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11403429445165854, 0.4383573563626179, 0.4383573563626179, 0.0, 0.06830710247159004, 0.6059620088969285, 0.0, 0.45242821057685295, 0.4874875855273925, 0.0, 0.0, 0.0, 0.4383573563626179, 0.7238674206229356, 0.0, 0.0, 0.0, 1.377119022837052, 0.0, 0.25457934886217115, 0.20878332020141757, 0.3714652218640997, 0.4383573563626179, 0.0, 0.0, 0.0, 0.0, 0.26669582260342745, 0.0, 0.0, 0.12967268553777384, 0.0, 0.0, 0.0, 0.0, 0.3714652218640997, 0.0, 0.06830710247159004, 0.0, 0.1317415266082837, 0.0, 0.14481798777213464, 0.39150820390249674, 0.8622538648689022, 0.12433093872207862, 0.4383573563626179, 0.11309712254084074, 0.0, 0.0, 0.0, 0.1317415266082837, 0.0, 0.0, 0.4383573563626179, 0.4383573563626179, 0.0, 0.4383573563626179, 0.0, 0.6735185424152476, 0.0, 0.3876965424475762, 0.0, 0.0, 0.0, 0.4383573563626179, 0.0, 0.39150820390249674, 0.0, 0.4383573563626179, 0.0, 0.11566166488023905, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.0, 0.20878332020141757, 0.0, 0.0, 0.0, 0.4383573563626179, 0.8622538648689022, 0.0, 0.1317415266082837, 0.0, 0.0, 0.0, 0.0, 0.39893530865128224, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4383573563626179, 0.0, 0.7608246485559413, 0.0, 0.0, 0.39150820390249674, 0.7608246485559413, 0.0, 0.0, 0.0, 0.519329229914225, 0.0, 0.0, 0.0, 0.0, 0.3714652218640997, 0.25457934886217115, 0.0, 0.0, 0.4383573563626179, 0.519329229914225, 0.0, 0.0, 0.0, 0.0, 0.7608246485559413, 0.0, 0.8622538648689022, 0.0, 0.0, 0.3876965424475762, 0.0, 0.0, 0.0, 0.0, 0.8622538648689022, 0.0, 0.8777779664378613, 0.6735185424152476, 0.7238674206229356, 0.0, 0.0, 0.8622538648689022, 0.0, 0.0, 0.0, 0.4383573563626179, 0.0, 0.0, 0.7066019622322458, 0.7608246485559413, 0.0, 0.0, 0.0, 0.4383573563626179, 0.0, 0.0, 0.0, 0.20878332020141757, 0.0, 0.0, 0.0, 0.0, 0.4383573563626179, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11403429445165854, 0.45242821057685295, 0.4383573563626179, 0.10603597227197427, 0.4383573563626179, 0.0, 0.4303353395599585, 0.0, 0.39150820390249674, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.39893530865128224, 1.377119022837052, 0.0, 0.0, 0.11403429445165854, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.39150820390249674, 1.0703157210436005, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.0, 0.6735185424152476, 0.0, 0.0, 0.7238674206229356, 0.6214872679888056, 0.0, 0.39150820390249674, 0.0, 0.20878332020141757, 0.0, 0.06830710247159004, 0.0, 0.4383573563626179, 0.4383573563626179, 0.10603597227197427, 0.4874875855273925, 0.0, 0.48411541920728407, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4874875855273925, 0.0, 0.0, 0.0, 0.8777779664378613, 0.0, 0.3479623925084105, 0.7156121545686172, 0.7156121545686172, 0.0, 0.8622538648689022, 0.4383573563626179, 0.0, 0.4383573563626179, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3714652218640997, 0.0, 0.0, 0.11566166488023905, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3714652218640997, 0.45242821057685295, 0.0, 0.0, 0.6214872679888056, 0.0, 0.0, 0.0, 1.377119022837052, 0.0, 0.45242821057685295, 0.4383573563626179, 0.12433093872207862, 0.0, 0.06830710247159004, 0.4383573563626179, 0.5556554651604249, 0.0, 0.4383573563626179, 0.0, 0.0, 0.0, 0.06830710247159004, 0.0, 0.39893530865128224, 0.7608246485559413, 0.4383573563626179, 0.0, 0.8777779664378613, 1.377119022837052, 0.06830710247159004, 0.0, 0.4383573563626179, 0.6735185424152476, 0.0, 0.0, 0.0, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.11403429445165854, 0.0, 0.0, 0.0, 0.0, 0.4383573563626179, 0.4383573563626179, 0.20878332020141757, 0.0, 0.0, 0.0, 0.3714652218640997, 0.8777779664378613, 0.0, 0.6875334479487859, 0.20878332020141757, 0.0, 0.1317415266082837, 0.39150820390249674, 1.377119022837052, 0.12433093872207862, 0.0, 0.0, 0.48411541920728407, 1.0703157210436005, 0.0, 0.0, 1.0703157210436005, 0.0, 0.0, 0.0, 0.11309712254084074, 1.377119022837052, 0.4383573563626179, 0.5632536763898455, 0.7156121545686172, 0.0, 0.0, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.9855494797946169, 0.0, 1.377119022837052, 0.39893530865128224, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4383573563626179, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.14481798777213464, 0.519329229914225, 0.20878332020141757, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11309712254084074, 0.14481798777213464, 0.4383573563626179, 0.0, 0.0, 0.0, 0.3479623925084105, 0.6214872679888056, 0.0, 0.0, 0.39150820390249674, 0.6214872679888056, 0.0, 0.0, 0.0, 0.4383573563626179, 0.4383573563626179, 0.0, 0.0, 0.4383573563626179, 0.6059620088969285, 0.0, 0.3876965424475762, 0.0, 0.4383573563626179, 0.20878332020141757, 0.0, 0.0, 0.12433093872207862, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06830710247159004, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.519329229914225, 0.06830710247159004, 0.0, 0.5103829058460319, 0.4383573563626179, 0.8622538648689022, 0.0, 0.0, 0.26669582260342745, 0.8622538648689022, 0.0, 0.0, 0.0, 0.0, 0.0, 1.377119022837052, 0.7608246485559413, 0.3876965424475762, 0.3876965424475762, 0.0, 0.0, 0.3714652218640997, 1.0318207265379338, 0.1510341577231884, 0.0, 0.26608212028558437, 0.8328973476703351, 0.2407714536556831, 0.0, 0.0, 0.0, 0.2712086973282007, 0.852654671898255, 0.11309712254084074, 0.0, 0.0, 0.0, 0.0, 0.4303353395599585, 0.0, 0.7544117451287233, 0.7544117451287233, 0.45281853343431766, 0.13678573914445363, 0.0, 0.11566166488023905, 0.11888350173830986, 0.0, 0.0, 0.13678573914445363, 0.37476971751222243, 0.41382877563054743, 0.8767147127252358, 0.671848807311975, 0.3955396360502793, 0.45465741083025935, 0.11888350173830986, 0.0, 0.36560761467195474, 0.2463321120120012, 0.7614126259890887, 1.2180815001519827, 0.27806692321856435, 0.2712086973282007, 0.8904172331381303, 0.0, 0.13678573914445363, 0.29972999050067023, 0.5269380773346012, 0.0, 0.0, 0.0, 0.2463321120120012, 0.9855494797946169, 1.0971561791661841, 0.8395290855031747, 0.0, 0.14334828584240034, 0.12967268553777384, 0.3643643899032703, 0.0, 0.0, 0.11309712254084074, 0.0, 0.0, 0.874901578346124, 0.0, 0.0, 0.10902992704739937, 0.41382877563054743, 0.0, 0.0, 0.0, 0.11265019734318439, 0.5103829058460319, 0.0, 0.0, 1.7383035655778187, 0.0, 0.0, 0.4303353395599585, 0.2463321120120012, 0.0, 0.37476971751222243, 0.0, 0.7238674206229356, 0.5621756752523092, 1.7383035655778187, 0.0, 0.0, 0.37476971751222243, 0.8395290855031747, 0.29972999050067023, 0.0, 0.0, 0.0, 1.2180815001519827, 0.619206467600396, 0.1510341577231884, 0.6315288291241113, 0.2407714536556831, 0.11265019734318439, 0.840765252781029, 0.11566166488023905, 0.8767147127252358, 0.10902992704739937, 0.36560761467195474, 0.3714652218640997, 0.7544117451287233, 0.0, 0.0, 0.9705241339137921, 0.8904172331381303, 0.0, 0.13304106014279218, 1.0971561791661841, 0.852654671898255, 0.11566166488023905, 0.0, 0.25789158381521704, 0.0, 0.12967268553777384, 0.0, 0.0, 1.2180815001519827, 0.0, 0.40029019254904524, 0.0, 0.390413059007663, 0.29972999050067023, 0.0, 0.9676604081375094, 0.0, 1.7383035655778187, 0.11265019734318439, 0.0, 0.0, 0.11265019734318439, 0.5505350146012811, 0.0, 0.0, 0.0, 0.7614126259890887, 0.0, 0.0, 1.7383035655778187, 0.0, 0.0, 0.4303353395599585, 0.11265019734318439, 0.36560761467195474, 0.3643643899032703, 1.2180815001519827, 0.0, 0.0, 1.2180815001519827, 1.2725104025923288, 0.3714652218640997, 0.38511113352500476, 0.9855494797946169, 0.36560761467195474, 0.11265019734318439, 0.5103829058460319, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13678573914445363, 0.0, 0.14334828584240034, 0.0, 0.2495630056382372, 0.11265019734318439, 0.0, 0.9705241339137921, 0.0, 0.10902992704739937, 0.6210548330241671, 0.3643643899032703, 0.2495630056382372, 0.0, 0.11309712254084074, 0.0, 0.26608212028558437, 0.0, 0.2463321120120012, 0.3714652218640997, 0.0, 0.45465741083025935, 0.38511113352500476, 0.8767147127252358, 0.0, 0.0, 0.0, 0.5269380773346012, 0.0, 0.0, 0.5505350146012811, 0.0, 1.2180815001519827, 0.0, 1.2725104025923288, 0.6315288291241113, 0.0, 0.0, 0.2563050588306326, 0.36560761467195474, 0.0, 0.5103829058460319, 0.8328973476703351, 0.25165000534974613, 0.5103829058460319, 0.0, 0.0, 0.0, 0.0, 0.10376263140485836, 0.0, 0.0, 0.0, 1.5567647682932706, 0.0, 0.25165000534974613, 0.14334828584240034, 1.0971561791661841, 0.0, 1.2725104025923288, 0.0, 0.0, 0.5356073511764408, 0.1746036392278396, 0.5103829058460319, 0.26608212028558437, 0.3955396360502793, 0.0, 0.40029019254904524, 0.1510341577231884, 0.0, 1.0971561791661841, 0.13678573914445363, 0.0, 0.8328973476703351, 0.0, 0.0, 0.40029019254904524, 0.11309712254084074, 0.0, 0.0, 0.11309712254084074, 0.0, 0.0, 1.417805487017792, 0.11566166488023905, 0.4303353395599585, 0.0, 0.671848807311975, 0.11566166488023905, 0.5556554651604249, 0.0, 0.11566166488023905, 0.13678573914445363, 0.0, 0.0, 0.0, 0.0, 0.13678573914445363, 0.7614126259890887, 0.0, 0.0, 1.0836212564546328, 0.0, 0.0, 1.0318207265379338, 0.11309712254084074, 0.11309712254084074, 0.0, 0.0, 0.5103829058460319, 0.0, 1.2725104025923288, 0.7614126259890887, 0.20757860482598728, 0.0, 0.26608212028558437, 0.1510341577231884, 0.41382877563054743, 0.8904172331381303, 0.0, 0.390413059007663, 0.9342806422796387, 0.619206467600396, 0.0, 0.0, 0.7066019622322458, 1.2180815001519827, 0.0, 1.0971561791661841, 0.5269380773346012, 0.40884702721467386, 0.0, 0.0, 0.9676604081375094, 0.9705241339137921, 0.0, 0.5356073511764408, 0.5269380773346012, 0.0, 0.37476971751222243, 0.0, 1.4477348412458713, 0.0, 0.0, 0.2407714536556831, 0.1510341577231884, 1.0836212564546328, 0.0, 0.0, 0.8534207517137894, 0.0, 0.0, 0.2463321120120012, 0.0, 0.5556554651604249, 0.5505350146012811, 0.40884702721467386, 0.0, 0.0, 0.0, 0.11265019734318439, 0.0, 0.25165000534974613, 0.37476971751222243, 0.852654671898255, 0.9705241339137921, 0.13678573914445363, 0.11265019734318439, 0.0, 0.0, 0.0, 0.0, 0.1746036392278396, 0.0, 0.0, 0.11566166488023905, 0.0, 0.0, 0.29972999050067023, 0.0, 0.0, 0.3714652218640997, 0.0, 0.5621756752523092, 0.0, 0.0, 0.5269380773346012, 0.0, 0.0, 0.0, 0.0, 0.0, 0.10376263140485836, 0.7614126259890887, 0.0, 0.2407714536556831, 0.0, 0.0, 0.2712086973282007, 0.0, 0.0, 0.0, 0.0, 0.8395290855031747, 0.840765252781029, 0.13678573914445363, 0.25165000534974613, 0.0, 0.0, 0.10902992704739937, 0.0, 0.0, 0.14334828584240034, 0.0, 0.2712086973282007, 0.6315288291241113, 0.0, 0.13304106014279218, 0.0, 0.2563050588306326, 0.0, 0.0, 0.0, 0.9342806422796387, 0.390413059007663, 0.26608212028558437, 0.2463321120120012, 0.0, 0.13678573914445363, 0.0, 0.619206467600396, 0.10376263140485836, 0.0, 0.0, 0.0, 0.0, 0.5621756752523092, 0.45281853343431766, 0.41382877563054743, 0.1510341577231884, 0.0, 0.36560761467195474, 0.11309712254084074, 1.417805487017792, 0.0, 0.0, 0.0, 0.9571833831186478, 0.11265019734318439, 0.3714652218640997, 0.0, 0.1746036392278396, 0.5556554651604249, 0.0, 0.25165000534974613, 1.0971561791661841, 0.0, 0.8328973476703351, 0.840765252781029, 0.0, 0.0, 0.0, 0.0, 1.0836212564546328, 1.2725104025923288, 0.0, 0.2463321120120012, 0.2463321120120012, 0.0, 0.0, 0.5505350146012811, 0.0, 1.0971561791661841, 0.0, 0.5505350146012811, 0.13678573914445363, 0.37476971751222243, 0.0, 0.0, 0.8039831556093234, 0.45281853343431766, 0.0, 0.11265019734318439, 0.13678573914445363, 1.2725104025923288, 0.0, 0.11566166488023905, 0.2407714536556831, 0.8328973476703351, 0.8395290855031747, 0.6315288291241113, 0.0, 0.0, 0.10376263140485836, 0.9342806422796387, 0.0, 0.5269380773346012, 0.0, 0.14334828584240034, 0.1510341577231884, 0.0, 0.0, 0.0, 0.1746036392278396, 0.41382877563054743, 0.14334828584240034, 0.8039831556093234, 0.0, 0.0, 0.0, 0.0, 0.0, 0.11309712254084074, 0.0, 0.0, 0.11309712254084074, 0.40029019254904524, 0.5356073511764408, 0.36560761467195474, 0.9676604081375094, 0.0, 0.2463321120120012, 0.852654671898255, 0.0, 0.0, 0.13304106014279218, 0.24956817724383795, 0.0, 0.13678573914445363, 0.9571833831186478, 0.0, 1.4477348412458713, 0.0, 0.13304106014279218, 0.41382877563054743, 0.0, 0.0, 0.11888350173830986, 0.11566166488023905, 0.0, 0.0, 0.3714652218640997, 0.840765252781029, 0.2712086973282007, 1.4477348412458713, 0.0, 0.11265019734318439, 0.874901578346124, 0.3955396360502793, 0.11309712254084074]
len(ns)
ls
print(len(nlp_score))
import nltk
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import argparse
import gensim
from gensim import models, utils
from gensim.test.utils import datapath
from gensim.models import KeyedVectors
import logging
import sys
from scipy import stats
from itertools import product
nltk.download('wordnet')
from nltk.corpus import wordnet as wn
wn.get_version()
!wget https://dl.fbaipublicfiles.com/fasttext/vectors-english/crawl-300d-2M.vec.zip
!unzip crawl-300d-2M.vec.zip
from gensim import models, utils
from gensim.test.utils import datapath
from gensim.models import KeyedVectors
wv = KeyedVectors.load_word2vec_format("/content/crawl-300d-2M.vec",binary=False)
cosine_similarity(wv["man"].reshape(1,-1),wv["male"].reshape(1,-1))
dddffff=pd.read_csv('nlp_score_3279.csv')
dddffff
dddffff.describe()
banana=pd.read_csv('data.csv')
banana[['width','height']]
pineapple=banana.columns.tolist()
d=dict()
for i in range(1,1559):
d[i]=pineapple[i-1]
print(pineapple[0])
pd.DataFrame.from_dict(d,orient='index').to_csv('column_names.csv')
df = pd.read_csv('complete_data.csv')
o1 = df[df.columns.values[1558]]
o1
df = df.drop(df.columns.values[1428:1559], axis= 1)
df2 = df
df2['nlp'] = dddffff['nlp'].to_list()
df2['output']= o1.to_list()
df2.describe()
df2.to_csv("nlpGendata_3279.csv", index = False)
###Output
_____no_output_____ |
notebooks/1.4-jf-data-etl-testing.ipynb | ###Markdown
Data ETL - Function TestingThis notebook implements the functions developed for Twitter data ETL. See notebooks 1.2 and 1.3, as well as documentation to understand the steps developed/implemented in these functions.
###Code
import os
import sys
from os.path import join
import pandas as pd
import numpy as np
project_dir = join(os.getcwd(), os.pardir)
raw_dir = join(project_dir, 'data', 'raw')
raw_fname = 'data_pull_sample.json'
sys.path.append(project_dir)
from src.data import transform, load_es
###Output
_____no_output_____
###Markdown
Initialize Elastic Search and Kibana docker containers:
###Code
os.chdir(project_dir)
!make database
os.chdir(join(project_dir, 'notebooks'))
df = transform(join(raw_dir, raw_fname))
load_es(df, verbose=True)
###Output
2020-08-03 18:48:18,893 - src.data._load_es - INFO - Loading data into es
2020-08-03 18:48:23,261 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:4.012s]
2020-08-03 18:48:23,803 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.376s]
2020-08-03 18:48:24,450 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.464s]
2020-08-03 18:48:25,183 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.426s]
2020-08-03 18:48:25,888 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.489s]
2020-08-03 18:48:26,592 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.488s]
2020-08-03 18:48:27,252 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.470s]
2020-08-03 18:48:27,794 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.343s]
2020-08-03 18:48:28,335 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.380s]
2020-08-03 18:48:29,121 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.637s]
2020-08-03 18:48:32,326 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:3.040s]
2020-08-03 18:48:35,425 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:2.075s]
2020-08-03 18:48:37,632 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.466s]
2020-08-03 18:48:38,423 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.490s]
2020-08-03 18:48:39,085 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.347s]
2020-08-03 18:48:39,588 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.272s]
2020-08-03 18:48:40,119 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.321s]
2020-08-03 18:48:40,591 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.244s]
2020-08-03 18:48:41,044 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.280s]
2020-08-03 18:48:41,556 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.308s]
2020-08-03 18:48:42,356 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.601s]
2020-08-03 18:48:43,054 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.522s]
2020-08-03 18:48:43,428 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.193s]
2020-08-03 18:48:43,924 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.268s]
2020-08-03 18:48:46,137 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:2.016s]
2020-08-03 18:48:48,134 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.565s]
2020-08-03 18:48:48,789 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.356s]
2020-08-03 18:48:49,460 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.436s]
2020-08-03 18:48:50,157 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.454s]
2020-08-03 18:48:50,802 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.343s]
2020-08-03 18:48:51,449 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.372s]
2020-08-03 18:48:52,003 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.322s]
2020-08-03 18:48:52,482 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.270s]
2020-08-03 18:48:53,147 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.443s]
2020-08-03 18:48:53,611 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.257s]
2020-08-03 18:48:54,072 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.241s]
2020-08-03 18:48:54,524 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.250s]
2020-08-03 18:48:55,108 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.361s]
2020-08-03 18:48:55,687 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.353s]
2020-08-03 18:48:56,234 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.284s]
2020-08-03 18:48:56,992 - elasticsearch - INFO - POST http://localhost:9200/twitter/_bulk [status:200 request:0.359s]
|
Cap05_IntegracionNumerica.ipynb | ###Markdown
Métodos NuméricosCapítulo 5: Diferenciación e integración numérica2021/02MEDELLÍN - COLOMBIA Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license.(c) Carlos Alberto Alvarez Henao *** ***Docente:*** Carlos Alberto Álvarez Henao, I.C. D.Sc.***e-mail:*** [email protected]***skype:*** carlos.alberto.alvarez.henao***Linkedin:*** https://www.linkedin.com/in/carlosalvarez5/***github:*** https://github.com/carlosalvarezh/Metodos_Numericos***Herramienta:*** [Jupyter](http://jupyter.org/)***Kernel:*** Python 3.8*** Tabla de Contenidos1 Diferenciación Numérica1.1 Introducción1.2 Series de Taylor1.3 Esquemas de diferencias finitas para la primera derivada1.3.1 Esquema de primer orden hacia adelante (forward)1.3.2 Esquema de primer orden hacia atrás (backward)1.3.3 Esquema de segundo orden (central)1.3.4 Resumen esquemas diferencias finitas para la primera derivada1.4 Esquemas de diferencias finitas para la segunda derivada1.5 Implementación computacional de algunos esquemas de diferencias finitas2 Integración Numérica2.1 Introducción2.2 Fórmulas de integración de Newton - Cotes2.3 Regla trapezoidal2.3.1 Regla trapezoidal de aplicación simple2.3.2 Regla trapezoidal de aplicación múltiple2.3.3 Implementación computacional2.3.4 Error en la aplicación de la regla trapezoidal2.4 Reglas de Simpson2.4.1 Regla de Simpson1/3 de aplicación simple2.4.2 Error en la regla de Simpson 1/3 de aplicación simple2.4.3 Regla de simpson1/3 de aplicación múltiple2.4.4 Implementación computacional regla de Simpson1/3 de aplicación múltiple2.4.5 Regla de Simpson 3/8 de aplicación simple2.4.6 Regla de Simpson3/8 de aplicación múltiple2.4.7 Implementación computacional de la regla de Simpson3/8 de aplicación múltiple2.5 Cuadratura de Gauss2.5.1 Introducción2.5.2 Determinación de los coeficientes2.5.3 Cambios de los límites de integración2.5.4 Fórmulas de punto superior2.5.5 Ejemplo Cuadratura de Gauss Diferenciación Numérica Introducción La [diferenciación numérica](https://en.wikipedia.org/wiki/Numerical_differentiation) se emplea para determinar (estimar) el valor de la derivada de una función en un punto específico. No confundir con la derivada de una función, pues lo que se obtendrá es un valor puntual y no una función. En este capítulo nos centraremos únicamente en ecuiaciones unidimensionales. [Volver a la Tabla de Contenido](TOC) Series de Taylor De la [serie de Taylor](https://en.wikipedia.org/wiki/Taylor_series) \begin{equation*}f(x_{i \pm 1}) = f(x_i) \pm f'(x_i)h + \frac{f''(x_i)h^2}{2!} \pm \frac{f'''(x_i)h^3}{3!} + \ldots\label{eq:Ec5_1} \tag{5.1}\end{equation*}con $h=\Delta x = x_{i+1}-x_i$ siendo el tamaño de paso.Dada que la serie contiene infinitos términos, partir de la ecuación ($5.1$) se pueden obtener infinitos esquemas numéricos para determinar cada una de las infinitas derivadas de dicho polinomio. En este curso usaremos la técnica de [Diferencias Finitas](https://en.wikipedia.org/wiki/Finite_difference) para desarrollarlas. [Volver a la Tabla de Contenido](TOC) Esquemas de diferencias finitas para la primera derivada Esquema de primer orden hacia adelante (forward) De la ecuación [(5.1)](Ec5_1) tomando los valores positivos, que involucran únicamente términos hacia adelante, se trunca la serie hasta la primera derivada y se realiza un despeje algebraico para llegar a:\begin{equation*}f'(x_i) = \frac{f(x_{i+1})-f(x_i)}{h} + \mathcal{O}(h)\label{eq:Ec5_2} \tag{5.2}\end{equation*}se puede observar que el término $\mathcal{O}(h)$ indica que el error es de orden lineal, es decir, si se reduce el tamaño de paso, $h$, a la mitad, el error se reducirá a la mitad. Si se reduc el tamaño de paso a una cuarta parte, el error se reducirá, linealmente, una cuarta parte. [Volver a la Tabla de Contenido](TOC) Esquema de primer orden hacia atrás (backward) De la ecuación [(5.1)](Ec5_1) tomando los valores negativos, que involucran únicamente términos hacia atrás (backward), se trunca la serie hasta la primera derivada y se realiza un despeje algebraico para llegar a:\begin{equation*}f'(x_i) = \frac{f(x_{i})-f(x_{i-1})}{h} + \mathcal{O}(h)\label{eq:Ec5_3} \tag{5.3}\end{equation*}se observa que se llega a una expresión similar a la de la ecuación [(5.2)](Ec5_2), pero de esta vez, se tiene en cuenta es el valor anterior al punto $x_i$. También se observa que el error es de orden lineal, por lo que se mantiene un esquema de primer orden. [Volver a la Tabla de Contenido](TOC) Esquema de segundo orden (central) Una forma de aumentar el orden de estos esquemas, es realizar el truncamiento de la *serie de Taylor* hasta la segunda derivada, hacia adelante y hacia atras, y realizar su resta aritmética.\begin{equation*}\begin{split}f(x_{i+1}) & = f(x_i) + f'(x_i)h + \frac{f''(x_i)h^2}{2!} \\- \\f(x_{i-1}) & = f(x_i) - f'(x_i)h + \frac{f''(x_i)h^2}{2!} \\\hline \\f(x_{i+1}) - f(x_{i-1}) & = 2 f'(x_i)h\end{split}\label{eq:Ec5_4} \tag{5.4}\end{equation*} de la anterior ecuación, despejando el término que corresponde a la primera derivada queda:\begin{equation*}\begin{split}f'(x_i) = \frac{f(x_{i+1}) - f(x_{i-1})}{2h} + \mathcal{O}(h^2)\end{split}\label{eq:Ec5_5} \tag{5.5}\end{equation*}se llega al esquema de diferencias finitas central para la primera derivada, que es de orden dos, es decir, si se disminuye el tamaño de paso, $h$, a la mitad, el error se disminuye una cuarta partes. En principio, esta es una mejor aproximación que los dos esquemas anteriores. La selección del esquema dependerá de la disponibilidad de puntos y del fenómeno físico a tratar. [Volver a la Tabla de Contenido](TOC) Resumen esquemas diferencias finitas para la primera derivada Como la serie de Taylor es infinita, se podrían determinar infinitos esquemas de diferentes ordenes para la primera derivada. En la siguiente tabla se presentan algunos esquemas de diferencias finitas para la primera derivada de diferentes órdenes. Se deja al estudiante la consulta de otros esquemas.|***Esquema***|***Función***|***Error***||:-----:|:-----:|:---:||***Forward***|$$f´(x_0)=\frac{f(x_0+h)-f(x_0)}{h}$$|$$\mathcal{O}(h)$$|| |$$f´(x_0)=\frac{-3f(x_0)+4f(x_0+h)-f(x_0+2h)}{2h}$$|$$\mathcal{O}(h^2)$$||***Central***|$$f´(x_0)=\frac{f(x_0+h)-f(x_0-h)}{2h}$$|$$\mathcal{O}(h^2)$$|| |$$f´(x_0)=\frac{f(x_0-2h)-8f(x_0-h)+8f(x_0+h)-f(x_0+2h)}{12h}$$|$$\mathcal{O}(h^4)$$||***Backward***|$$f´(x_0)=\frac{f(x_0)-f(x_0-h)}{h}$$|$$\mathcal{O}(h)$$|| |$$f´(x_0)=\frac{f(x_0-2h)-4f(x_0-h)+3f(x_0)}{2h}$$|$$\mathcal{O}(h^2)$$| [Volver a la Tabla de Contenido](TOC) Esquemas de diferencias finitas para la segunda derivada Siguiendo con la misma forma de abordar el problema para la primera derivada, si se amplian los términos en la serie de Taylor hasta la tercera derivada tanto hacia adelante como hacia atrás, y se suman, se llega a:\begin{equation*}\begin{split}f(x_{i+1}) & = f(x_i) + f'(x_i)h + \frac{f''(x_i)h^2}{2!} + \frac{f'''(x_i)h^3}{3!}\\+ \\f(x_{i-1}) & = f(x_i) - f'(x_i)h + \frac{f''(x_i)h^2}{2!} - \frac{f'''(x_i)h^3}{3!}\\\hline \\f(x_{i+1}) + f(x_{i-1}) & = 2 f(x_i) + 2f''(x_i)\frac{h^2}{2!} + \mathcal{O}(h^3)\end{split}\label{eq:Ec5_6} \tag{5.6}\end{equation*} Despejando para el término de la segunda derivada, se llega a:\begin{equation*}\begin{split}f''(x_i) = \frac{f(x_{i+1}) - 2f(x_i) + f(x_{i-1})}{h^2} + \mathcal{O}(h^3)\end{split}\label{eq:Ec5_7} \tag{5.7}\end{equation*}Que corresponde a un esquema de diferencias finitas de segundo orden para la segunda derivada. A este esquema también se le llama "*molécula de tres puntos*"Igual que para la primera derivada, se pueden determinar infinitos esquemas de diferentes órdenes para la segunda derivada, y derivadas superiores. A continuación se muestra un cuadro resumen de algunos esquemas de diferencias finitas para la segunda derivada. Se deja al estudiante la revisión de esquemas de mayor orden para la segunda derivada y derivadas superiores.|***Esquema***|***Función***|***Error***||:-----:|:-----:|:---:||***Forward***|$$f''(x_0)=\frac{f(x_0)-2f(x_0+h)+f(x_0+2h)}{h^2}$$|$$\mathcal{O}(h)$$|| |$$f''(x_0)=\frac{2f(x_0)-5f(x_0+h)+4f(x_0+2h)-f(x_0+3h)}{h^2}$$|$$\mathcal{O}(h^2)$$||***Central***|$$f''(x_0)=\frac{f(x_0-h)-2f(x_0)+f(x_0+h)}{h^2}$$|$$\mathcal{O}(h^2)$$|| |$$f''(x_0)=\frac{-f(x_0-2h)+16f(x_0-h)-30f(x_0)+16f(x_0+h)-f(x_0+2h)}{12h^2}$$|$$\mathcal{O}(h^4)$$||***Backward***|$$f''(x_0)=\frac{f(x_0-2h)-2f(x_0-h)+f(x_0)}{h}$$|$$\mathcal{O}(h^2)$$|| |$$f''(x_0)=\frac{-f(x_0-3h)+4f(x_0-2h)-5f(x_0-h)+2f(x_0)}{h^2}$$|$$\mathcal{O}(h^2)$$| [Volver a la Tabla de Contenido](TOC) Implementación computacional de algunos esquemas de diferencias finitas A manera de ejemplo, se implementarán algunos esquemas simples de diferencias finitas para la primera derivada. Se deja como actividad a los estudiantes la implementación de otros esquemas para las diferentes derivadas.
###Code
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
sym.init_printing()
#Esquemas de diferencias finitas para la primera derivada
def df1df(x0, h):
# Esquema de diferencias finitas para la primera derivada hacia adelante (forward)
return (f(x0 + h) - f(x0)) / h
def df1db(x0, h):
# Esquema de diferencias finitas para la primera derivada hacia atrás (backward)
return (f(x0) - f(x0 - h) ) / h
def df1dc(x0,h):
# Esquema de diferencias finitas para la primera derivada central (central)
return (f(x0 + h) - f(x0 - h) ) / (2 * h)
#funcion a determinar el valor de la derivada
def f(x):
return 2*x**3 - 3*x**2 + 5*x+0.8
#cálculo y evaluación de la primera derivada empleando cálculo simbólico
def df1de(x0):
x = sym.Symbol('x')
df = sym.diff(f(x), x)
#print(df)
df1 = df.evalf(subs={x:x0})
return df1
h = 0.1
x0 = 0.8
print("1st derivative \t Value \t\t Error(%)")
print('---------------------------------------')
pde = df1de(x0)
pdf = df1df(x0, h)
epdf = abs((pde - pdf) / pde * 100)
print("forward \t {0:6.4f} \t {1:6.2f}".format(pdf,epdf))
pdb = df1db(x0, h)
epdb = abs((pde - pdb) / pde * 100)
print("backward \t {0:6.4f} \t {1:6.2f}".format(pdb,epdb))
pdc = df1dc(x0,h)
epdc = abs((pde - pdc) / pde * 100)
print("central \t {0:6.4f} \t {1:6.2f}".format(pdc, epdc))
print("exacta \t\t {0:6.4f} \t {1}".format(pde, ' -'))
###Output
_____no_output_____
###Markdown
[Volver a la Tabla de Contenido](TOC) Integración Numérica Introducción La [integración numérica](https://en.wikipedia.org/wiki/Numerical_integration) aborda una amplia gama de algoritmos para determinar el valor numérico (aproximado) de una integral definida. En este curso nos centraremos principalmente en los métodos de cuadratura, tanto de interpolación como [gaussiana](https://en.wikipedia.org/wiki/Gaussian_quadrature), como dos ejemplos de dichos algoritmos. El problema a tratar en este capítulo es la solución aproximada de la función\begin{equation*}\begin{split}I = \int_a^b f(x) dx\end{split}\label{eq:Ec5_8} \tag{5.8}\end{equation*} [Volver a la Tabla de Contenido](TOC) Fórmulas de integración de *Newton - Cotes* La idea básica en la integración numérica es cambiar una función difícil de integrar, $f(x)$, dada por la ecuación [(5.8)](Ec5_8), por una función más simple, $p_n(x)$,\begin{equation*}\begin{split}\widetilde{I} \approx \int_{a=x_0}^{b=x_n} p_{n}(x) dx\end{split}\label{eq:Ec5_9} \tag{5.9}\end{equation*}Cabe resaltar que en integración numérica no se conocerá la función a integrar, solo se dispondrá de una serie de $n+1$ puntos $(x_i, y_i), i = 0, 1, 2, \ldots, n$, y a partir de ellos se construye un polinomio interpolante de grado $n$, $p_n$, entre los valores de los límites de integración $a = x_0$ y $b=x_n$. $p_n(x)$ es un polinomio de interpolación de la forma\begin{equation*}\begin{split}p_n(x)=a_0+a_1x+a_2x^2+\ldots+a_{n-1}x^{n-1}+a_nx^n\end{split}\label{eq:Ec5_10} \tag{5.10}\end{equation*}Las fórmulas de integración de [*Newton - Cotes*](https://en.wikipedia.org/wiki/Newton%E2%80%93Cotes_formulas), también llamadas de [cuadratura](https://en.wikipedia.org/wiki/Quadrature_(mathematics)), son un grupo de fórmulas de integración numérica de tipo interpolación, evaluando la función en puntos equidistantes, para determinar un valor aproximado de la integral. Si no se tienen puntos espaciados, otros métodos deben ser usados, como por ejemplo cuadratura gaussiana, que se verá al final del capítulo.La forma general de las fórmulas de Newton - Cotes está dada por la función:\begin{equation*}\begin{split}p_n(x)=\sum \limits_{i=0}^n f(x_i)L_{in}(x)\end{split}\label{eq:Ec5_11} \tag{5.11}\end{equation*}donde\begin{equation*}\begin{split}L_{in}(x)=\frac{(x-x_0)\ldots(x-x_{i-1})(x-x_{i+1})\ldots(x-x_n)}{(x_i-x_0)\ldots(x_i-x_{i-1})(x_i-x_{i+1})\ldots(x_i-x_n)}\end{split}\label{eq:Ec5_12} \tag{5.12}\end{equation*}es el polinomio de Lagrange, de donde se deduce que:\begin{equation*}\begin{split}\int_a^b p(x)dx=(b-a)\sum \limits_{i=0}^n f(x_i) \frac{1}{(b-a)} \int_a^b L_{in}(x)dx\end{split}\label{eq:Ec5_13} \tag{5.13}\end{equation*}entonces,\begin{equation*}\begin{split}\int_a^b f(x)dx \approx \int_a^b p(x)dx=(b-a)\sum \limits_{i=0}^n w_if(x_i) \end{split}\label{eq:Ec5_14} \tag{5.14}\end{equation*}donde los pesos, $w_i$ de la función son representados por\begin{equation*}\begin{split}w_i=\frac{1}{(b-a)} \int_a^b L_{in}(x)dx\end{split}\label{eq:Ec5_15} \tag{5.15}\end{equation*}A partir de esta idea se obtienen los diferentes esquemas de integración numérica de *Newton - Cotes* [Volver a la Tabla de Contenido](TOC) Regla trapezoidal Regla trapezoidal de aplicación simple La [regla trapezoidal](https://en.wikipedia.org/wiki/Trapezoidal_rule) emplea una aproximación de la función mediante una línea recta Fuente: wikipedia.com y corresponde al caso en el que el polinomio en la ecuación [(5.11)](Ec5_11) es de primer orden\begin{equation*}\begin{split}I=\int_{a}^{b}f(x)dx \approx \int_a^b \left[ f(a) + \frac{f(b)-f(a)}{b-a}(x-a)\right]dx= (b-a)\frac{f(a)+f(b)}{2}\end{split}\label{eq:Ec5_16} \tag{5.16}\end{equation*}Geométricamente, es equivalente a aproximar el área del trapezoide bajo la línea recta que conecta $f(a)$ y $f(b)$. La integral se representa como:$$I ≈ \text{ancho} \times \text{altura promedio}$$El error en la regla trapezoidal simple se puede determinar como:\begin{equation*}\begin{split}E_t=-\frac{1}{12}f''(\xi)(b-a)^3\end{split}\label{eq:Ec5_17} \tag{5.17}\end{equation*} [Volver a la Tabla de Contenido](TOC) Regla trapezoidal de aplicación múltiple Una manera de mejorar la exactitud de la regla trapezoidal es dividir el intervalo de integración de $a$ a $b$ en un número $n$ de segmentos y aplicar el método a cada uno de ellos. Las ecuaciones resultantes son llamadas fórmulas de integración de múltiple aplicación o compuestas. Fuente: wikipedia.com Hay $n+1$ puntos base igualmente espaciados $(x_0, x_1, x_2, \ldots, x_n)$. En consecuencia hay $n$ segmentos de igual anchura: $h = (b–a) / n$. Si $a$ y $b$ son designados como $x_0$ y $x_n$ respectivamente, la integral total se representará como:\begin{equation*}\begin{split}I=\int_{x_0}^{x_1}f(x)dx+\int_{x_1}^{x_2}f(x)dx+\int_{x_2}^{x_3}f(x)dx+\ldots+\int_{x_{n-2}}^{x_{n-1}}f(x)dx+\int_{x_{n-1}}^{x_n}f(x)dx\end{split}\label{eq:Ec5_18} \tag{5.18}\end{equation*}Al sustituir la regla trapezoidal simple en cada integrando, se tiene\begin{equation*}\begin{split}I\approx \left(f(x_0)+f(x_1)\right)\frac{h}{2}+\left(f(x_1)+f(x_2)\right)\frac{h}{2}+\left(f(x_2)+f(x_3)\right)\frac{h}{2}+\ldots\left(f(x_{n-2})+f(x_{n-1})\right)\frac{h}{2}+\left(f(x_{n-1})+f(x_n)\right)\frac{h}{2}\end{split}\label{eq:Ec5_19} \tag{5.19}\end{equation*}ahora, agrupando términos\begin{equation*}\begin{split}I\approx \frac{h}{2}\left[ f(x_0) + 2\sum_{i=1}^{n-1}f(x_i)+f(x_n) \right]\end{split}\label{eq:Ec5_20} \tag{5.20}\end{equation*}donde $h=(b-a)/n$ [Volver a la Tabla de Contenido](TOC) Implementación computacional
###Code
import numpy as np
import matplotlib.pyplot as plt
def trapezoidal(x):
n = len(x)
h = (x[-1] - x[0]) / n
suma = 0
for i in range(1, n-1):
suma += funcion(x[i])
return h * (funcion(x[0]) + 2 * suma + funcion(x[-1])) / 2
def funcion(x):
return 4 / (1 + x**2)
a = 0
b = 1
n = 2
x = np.linspace(a, b, n+1)
I = trapezoidal(x)
I
###Output
_____no_output_____
###Markdown
[Volver a la Tabla de Contenido](TOC) Error en la aplicación de la regla trapezoidal Recordando que estos esquemas provienen de la serie truncada de Taylor, el error se puede obtener determinando el primer término truncado en el esquema, que para la regla trapezoidal de aplicación simple corresponde a:\begin{equation*}\begin{split}E_t=-\frac{1}{12}f''(\xi)(b-a)^3\end{split}\label{eq:Ec5_21} \tag{5.21}\end{equation*}donde $f''(\xi)$ es la segunda derivada en el punto $\xi$ en el intervalo $[a,b]$, y $\xi$ es un valor que maximiza la evaluación de esta segunda derivada. Generalizando este concepto a la aplicación múltiple de la regla trapezoidal, se pueden sumar cada uno de los errores en cada segmento para dar:\begin{equation*}\begin{split}E_t=-\frac{(b-a)^3}{12n^3}\sum\limits_{i=1}^n f''(\xi_i)\end{split}\label{eq:Ec5_22} \tag{5.22}\end{equation*}el anterior resultado se puede simplificar estimando la media, o valor promedio, de la segunda derivada para todo el intervalo\begin{equation*}\begin{split}\bar{f''} \approx \frac{\sum \limits_{i=1}^n f''(\xi_i)}{n}\end{split}\label{eq:Ec5_23} \tag{5.23}\end{equation*}de esta ecuación se tiene que $\sum f''(\xi_i)\approx nf''$, y reemplazando en la ecuación [(5.23)](Ec5_23)\begin{equation*}\begin{split}E_t \approx \frac{(b-a)^3}{12n^2}\bar{f''}\end{split}\label{eq:Ec5_24} \tag{5.24}\end{equation*}De este resultado se observa que si se duplica el número de segmentos, el error de truncamiento se disminuirá a una cuarta parte. [Volver a la Tabla de Contenido](TOC) Reglas de Simpson Las [reglas de Simpson](https://en.wikipedia.org/wiki/Simpson%27s_rule) son esquemas de integración numérica en honor al matemático [*Thomas Simpson*](https://en.wikipedia.org/wiki/Thomas_Simpson), utilizado para obtener la aproximación de la integral empleando interpolación polinomial sustituyendo a $f(x)$. [Volver a la Tabla de Contenido](TOC) Regla de Simpson1/3 de aplicación simple La primera regla corresponde a una interpolación polinomial de segundo orden sustituida en la ecuación [(5.8)](Ec5_8) Fuente: wikipedia.com \begin{equation*}\begin{split}I=\int_a^b f(x)dx \approx \int_a^b p_2(x)dx\end{split}\label{eq:Ec5_25} \tag{5.25}\end{equation*}del esquema de interpolación de Lagrange para un polinomio de segundo grado, visto en el capitulo anterior, y remplazando en la integral arriba, se llega a \begin{equation*}\begin{split}I\approx\int_{x0}^{x2} \left[\frac{(x-x_1)(x-x_2)}{(x_0-x_1)(x_0-x_2)}f(x_0)+\frac{(x-x_0)(x-x_2)}{(x_1-x_0)(x_1-x_2)}f(x_1)+\frac{(x-x_0)(x-x_1)}{(x_2-x_0)(x_2-x_1)}f(x_2)\right]dx\end{split}\label{eq:Ec5_26} \tag{5.26}\end{equation*}realizando la integración de forma analítica y un manejo algebraico, resulta\begin{equation*}\begin{split}I\approx\frac{h}{3} \left[ f(x_0)+4f(x_1)+f(x_2)\right]\end{split}\label{eq:Ec5_27} \tag{5.27}\end{equation*}donde $h=(b-a)/2$ y los $x_{i+1} = x_i + h$ A continuación, vamos a comparar graficamente las funciones "exacta" (con muchos puntos) y una aproximada empleando alguna técnica de interpolación para $n=3$ puntos (Polinomio interpolante de orden $2$).
###Code
from scipy.interpolate import barycentric_interpolate
# usaremos uno de los tantos métodos de interpolación dispobibles en las bibliotecas de Python
n = 3 # puntos a interpolar para un polinomio de grado 2
xp = np.linspace(a,b,n) # generación de n puntos igualmente espaciados para la interpolación
fp = funcion(xp) # evaluación de la función en los n puntos generados
x = np.linspace(a, b, 100) # generación de 100 puntos igualmente espaciados
y = barycentric_interpolate(xp, fp, x) # interpolación numérica empleando el método del Baricentro
fig = plt.figure(figsize=(9, 6), dpi= 80, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
l, = plt.plot(x, y)
plt.plot(x, funcion(x), '-', c='red')
plt.plot(xp, fp, 'o', c=l.get_color())
plt.annotate('Función "Real"', xy=(.63, 1.5), xytext=(0.8, 1.25),arrowprops=dict(facecolor='black', shrink=0.05),)
plt.annotate('Función interpolada', xy=(.72, 1.75), xytext=(0.4, 2),arrowprops=dict(facecolor='black', shrink=0.05),)
plt.grid(True) # muestra la malla de fondo
plt.show() # muestra la gráfica
###Output
_____no_output_____
###Markdown
Se observa que hay una gran diferencia entre las áreas que se estarían abarcando en la función llamada "*real*" (que se emplearon $100$ puntos para su generación) y la función *interpolada* (con únicamente $3$ puntos para su generación) que será la empleada en la integración numérica (aproximada) mediante la regla de *Simpson $1/3$*.Conscientes de esto, procederemos entonces a realizar el cálculo del área bajo la curva del $p_3(x)$ empleando el método de *Simpson $1/3$* Creemos un programa en *Python* para que nos sirva para cualquier función $f(x)$ que queramos integrar en cualquier intervalo $[a,b]$ empleando la regla de integración de *Simpson $1/3$*:
###Code
# se ingresan los valores del intervalo [a,b]
a = float(input('Ingrese el valor del límite inferior: '))
b = float(input('Ingrese el valor del límite superior: '))
# cuerpo del programa por la regla de Simpson 1/3
h = (b-a)/2 # cálculo del valor de h
x0 = a # valor del primer punto para la fórmula de S1/3
x1 = x0 + h # Valor del punto intermedio en la fórmula de S1/3
x2 = b # valor del tercer punto para la fórmula de S1/3
fx0 = funcion(x0) # evaluación de la función en el punto x0
fx1 = funcion(x1) # evaluación de la función en el punto x1
fx2 = funcion(x2) # evaluación de la función en el punto x2
int_S13 = h / 3 * (fx0 + 4*fx1 + fx2)
#erel = np.abs(exacta - int_S13) / exacta * 100
print('el valor aproximado de la integral por la regla de Simpson1/3 es: ', int_S13, '\n')
#print('el error relativo entre el valor real y el calculado es: ', erel,'%')
###Output
_____no_output_____
###Markdown
[Volver a la Tabla de Contenido](TOC) Error en la regla de Simpson 1/3 de aplicación simple El problema de calcular el error de esta forma es que realmente no conocemos el valor exacto. Para poder calcular el error al usar la regla de *Simpson 1/3*:\begin{equation*}\begin{split}-\frac{h^5}{90}f^{(4)}(\xi)\end{split}\label{eq:Ec5_28} \tag{5.28}\end{equation*}será necesario derivar cuatro veces la función original: $f(x)=e^{x^2}$. Para esto, vamos a usar nuevamente el cálculo simbólico (siempre deben verificar que la respuesta obtenida es la correcta!!!):
###Code
from sympy import *
x = symbols('x')
###Output
_____no_output_____
###Markdown
Derivamos cuatro veces la función $f(x)$ con respecto a $x$:
###Code
deriv4 = diff(4 / (1 + x**2),x,4)
deriv4
###Output
_____no_output_____
###Markdown
y evaluamos esta función de la cuarta derivada en un punto $0 \leq \xi \leq 1$. Como la función $f{^{(4)}}(x)$ es creciente en el intervalo $[0,1]$ (compruébelo gráficamente y/o por las técnicas vistas en cálculo diferencial), entonces, el valor que hace máxima la cuarta derivada en el intervalo dado es:
###Code
x0 = 1.0
evald4 = deriv4.evalf(subs={x: x0})
print('El valor de la cuarta derivada de f en x0={0:6.2f} es {1:6.4f}: '.format(x0, evald4))
###Output
_____no_output_____
###Markdown
Calculamos el error en la regla de *Simpson$1/3$*
###Code
errorS13 = abs(h**5*evald4/90)
print('El error al usar la regla de Simpson 1/3 es: {0:6.6f}'.format(errorS13))
###Output
_____no_output_____
###Markdown
Entonces, podemos expresar el valor de la integral de la función $f(x)=e^{x^2}$ en el intervalo $[0,1]$ usando la *Regla de Simpson $1/3$* como:$$\color{blue}{\int_0^1 \frac{4}{1 + x^2}dx} = \color{green}{3,133333} \color{red}{+ 0.004167}$$ Si lo fuéramos a hacer "a mano" $\ldots$ aplicando la fórmula directamente, con los siguientes datos:$h = \frac{(1.0 - 0.0)}{2.0} = 0.5$$x_0 = 0.0$$x_1 = 0.5$$x_2 = 1.0$$f(x) = \frac{4}{1 + x^2}$sustituyendo estos valores en la fórmula dada:$\int_0^1\frac{4}{1 + x^2}dx \approx \frac{0.5}{3} \left[f(0)+4f(0.5)+f(1)\right]$$\int_0^1\frac{4}{1 + x^2}dx \approx \frac{0.5}{3} \left[ \frac{4}{1 + 0^2} + 4\frac{4}{1 + 0.5^2} + \frac{4}{1 + 1^2} \right] \approx 3.133333$ [Volver a la Tabla de Contenido](TOC) Regla de simpson1/3 de aplicación múltiple Al igual que en la regla Trapezoidal, las reglas de Simpson también cuentan con un esquema de aplicación múltiple (llamada también compuesta). Supongamos que se divide el intervalo $[a,b]$ se divide en $n$ sub intervalos, con $n$ par, quedando la integral\begin{equation*}\begin{split}I=\int_{x_0}^{x_2}f(x)dx+\int_{x_2}^{x_4}f(x)dx+\ldots+\int_{x_{n-2}}^{x_n}f(x)dx\end{split}\label{eq:Ec5_29} \tag{5.29}\end{equation*}y sustituyendo en cada una de ellas la regla de Simpson1/3, se llega a\begin{equation*}\begin{split}I \approx 2h\frac{f(x_0)+4f(x_1)+f(x_2)}{6}+2h\frac{f(x_2)+4f(x_3)+f(x_4)}{6}+\ldots+2h\frac{f(x_{n-2})+4f(x_{n-1})+f(x_n)}{6}\end{split}\label{eq:Ec5_30} \tag{5.30}\end{equation*}entonces la regla de Simpson compuesta (o de aplicación múltiple) se escribe como:\begin{equation*}\begin{split}I=\int_a^bf(x)dx\approx \frac{h}{3}\left[f(x_0) + 2 \sum \limits_{j=1}^{n/2-1} f(x_{2j}) + 4 \sum \limits_{j=1}^{n/2} f(x_{2j-1})+f(x_n)\right]\end{split}\label{eq:Ec5_31} \tag{5.31}\end{equation*}donde $x_j=a+jh$ para $j=0,1,2, \ldots, n-1, n$ con $h=(b-a)/n$, $x_0=a$ y $x_n=b$. [Volver a la Tabla de Contenido](TOC) Implementación computacional regla de Simpson1/3 de aplicación múltiple [Volver a la Tabla de Contenido](TOC) Regla de Simpson 3/8 de aplicación simple Resulta cuando se sustituye la función $f(x)$ por una interpolación de tercer orden:\begin{equation*}\begin{split}I=\int_{a}^{b}f(x)dx = \frac{3h}{8}\left[ f(x_0)+3f(x_1)+3f(x_2)+f(x_3) \right]\end{split}\label{eq:Ec5_32} \tag{5.32}\end{equation*} Realizando un procedimiento similar al usado para la regla de *Simpson $1/3$*, pero esta vez empleando $n=4$ puntos:
###Code
# usaremos uno de los tantos métodos de interpolación dispobibles en las bibliotecas de Python
n = 4 # puntos a interpolar para un polinomio de grado 2
xp = np.linspace(0,1,n) # generación de n puntos igualmente espaciados para la interpolación
fp = funcion(xp) # evaluación de la función en los n puntos generados
x = np.linspace(0, 1, 100) # generación de 100 puntos igualmente espaciados
y = barycentric_interpolate(xp, fp, x) # interpolación numérica empleando el método del Baricentro
fig = plt.figure(figsize=(9, 6), dpi= 80, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
l, = plt.plot(x, y)
plt.plot(x, funcion(x), '-', c='red')
plt.plot(xp, fp, 'o', c=l.get_color())
plt.annotate('"Real"', xy=(.63, 1.5), xytext=(0.8, 1.25),arrowprops=dict(facecolor='black', shrink=0.05),)
plt.annotate('Interpolación', xy=(.72, 1.75), xytext=(0.4, 2),arrowprops=dict(facecolor='black', shrink=0.05),)
plt.grid(True) # muestra la malla de fondo
plt.show() # muestra la gráfica
# cuerpo del programa por la regla de Simpson 3/8
h = (b - a) / 3 # cálculo del valor de h
int_S38 = 3 * h / 8 * (funcion(a) + 3*funcion(a + h) + 3*funcion(a + 2*h) + funcion(a + 3*h) + 3 * funcion(a+4*h)/8)
erel = np.abs(np.pi - int_S38) / np.pi * 100
print('el valor aproximado de la integral utilizando la regla de Simpson 3/8 es: ', int_S38, '\n')
print('el error relativo entre el valor real y el calculado es: ', erel,'%')
###Output
_____no_output_____
###Markdown
Para poder calcular el error al usar la regla de *Simpson 3/8*:$$\color{red}{-\frac{3h^5}{80}f^{(4)}(\xi)}$$será necesario derivar cuatro veces la función original. Para esto, vamos a usar nuevamente el cálculo simbólico (siempre deben verificar que la respuesta obtenida es la correcta!!!):
###Code
errorS38 = 3*h**5*evald4/80
print('El error al usar la regla de Simpson 3/8 es: ',errorS38)
###Output
_____no_output_____
###Markdown
Entonces, podemos expresar el valor de la integral de la función $f(x)=e^{x^2}$ en el intervalo $[0,1]$ usando la *Regla de Simpson $3/8$* como:$$\color{blue}{\int_0^1\frac{4}{1 + x^2}dx} = \color{green}{3.138462} \color{red}{- 0.001852}$$ Aplicando la fórmula directamente, con los siguientes datos:$h = \frac{(1.0 - 0.0)}{3.0} = 0.33$$x_0 = 0.0$, $x_1 = 0.33$, $x_2 = 0.66$, $x_3 = 1.00$$f(x) = \frac{4}{1 + x^2}$sustituyendo estos valores en la fórmula dada:$\int_0^1\frac{4}{1 + x^2}dx \approx \frac{3\times0.3333}{8} \left[ \frac{4}{1 + 0^2} + 3\frac{4}{1 + 0.3333^2} +3\frac{4}{1 + 0.6666^2} + \frac{4}{1 + 1^2} \right] \approx 3.138462$Esta sería la respuesta si solo nos conformamos con lo que podemos hacer usando word... [Volver a la Tabla de Contenido](TOC) Regla de Simpson3/8 de aplicación múltiple Dividiendo el intervalo $[a,b]$ en $n$ sub intervalos de longitud $h=(b-a)/n$, con $n$ múltiplo de 3, quedando la integral\begin{equation*}\begin{split}I=\int_{x_0}^{x_3}f(x)dx+\int_{x_3}^{x_6}f(x)dx+\ldots+\int_{x_{n-3}}^{x_n}f(x)dx\end{split}\label{eq:Ec5_33} \tag{5.33}\end{equation*}sustituyendo en cada una de ellas la regla de Simpson3/8, se llega a\begin{equation*}\begin{split}I=\int_a^bf(x)dx\approx \frac{3h}{8}\left[f(x_0) + 3 \sum \limits_{i=0}^{n/3-1} f(x_{3i+1}) + 3 \sum \limits_{i=0}^{n/3-1}f(x_{3i+2})+2 \sum \limits_{i=0}^{n/3-2} f(x_{3i+3})+f(x_n)\right]\end{split}\label{eq:Ec5_34} \tag{5.34}\end{equation*}donde en cada sumatoria se deben tomar los valores de $i$ cumpliendo que $i=i+3$. [Volver a la Tabla de Contenido](TOC) Implementación computacional de la regla de Simpson3/8 de aplicación múltiple
###Code
#
###Output
_____no_output_____
###Markdown
[Volver a la Tabla de Contenido](TOC) Cuadratura de Gauss Introducción Retomando la idea inicial de los esquemas de [cuadratura](Quadrature), el valor de la integral definida se estima de la siguiente manera:\begin{equation*}\begin{split}I=\int_a^b f(x)dx \approx \sum \limits_{i=0}^n c_if(x_i)\end{split}\label{eq:Ec5_35} \tag{5.35}\end{equation*}Hasta ahora hemos visto los métodos de la regla trapezoidal y las reglas de Simpson más empleadas. En estos esquemas, la idea central es la distribución uniforme de los puntos que siguen la regla $x_i=x_0+ih$, con $i=0,1,2, \ldots, n$ y la evaluación de la función en estos puntos.Supongamos ahora que la restricción de la uniformidad en el espaciamiento de esos puntos fijos no es más considerada y se tiene la libertad de evaluar el área bajo una recta que conecte a dos puntos cualesquiera sobre la curva. Al ubicar estos puntos en forma “inteligente”, se puede definir una línea recta que equilibre los errores negativos y positivos Fuente: Chapra, S., Canale, R. Métodos Numéricos para ingenieros, 5a Ed. Mc. Graw Hill. 2007 De la figura de la derecha, se disponen de los puntos $x_0$ y $x_1$ para evaluar la función $f(x)$. Expresando la integral bajo la curva de forma aproximada dada en la la ecuación ([5.35](Ec5_35)), y empleando los límites de integración en el intervalo $[-1,1]$ por simplicidad (después se generalizará el concepto a un intervalo $[a,b]$), se tiene\begin{equation*}\begin{split}I=\int_{-1}^1 f(x)dx \approx c_0f(x_0)+c_1f(x_1)\end{split}\label{eq:Ec5_36} \tag{5.36}\end{equation*} [Volver a la Tabla de Contenido](TOC) Determinación de los coeficientes se tiene una ecuación con cuatro incógnitas ($c_0, c_1, x_0$ y $x_1$) que se deben determinar. Para ello, supongamos que disponemos de un polinomio de hasta grado 3, $f_3(x)$, de donde podemos construir cuatro ecuaciones con cuatro incógnitas de la siguiente manera:- $f_3(x)=1$:\begin{equation*}\begin{split}\int_{-1}^1 1dx = c_0 \times 1 + c_1 \times 1 = c_0 + c_1 = 2\end{split}\label{eq:Ec5_37} \tag{5.37}\end{equation*}- $f_3(x)=x$:\begin{equation*}\begin{split}\int_{-1}^1 xdx = c_0x_0 + c_1x_1 = 0\end{split}\label{eq:Ec5_38} \tag{5.38}\end{equation*}- $f_3(x)=x^2$:\begin{equation*}\begin{split}\int_{-1}^1 x^2dx = c_0x^2_0 + c_1x^2_1 = \frac{2}{3}\end{split}\label{eq:Ec5_39} \tag{5.39}\end{equation*}y por último- $f_3(x)=x^3$:\begin{equation*}\begin{split}\int_{-1}^1 x^3dx = c_0x^3_0 + c_1x^3_1 = 0\end{split}\label{eq:Ec5_40} \tag{5.40}\end{equation*}resolviendo simultáneamente las dos primeras ecuaciones para $c_0$ y $c_1$ en térm,inos de $x_0$ y $x_1$, se llega a\begin{equation*}\begin{split}c_0=\frac{2x_1}{x_1-x_0}, \quad c_1=-\frac{2x_0}{x_1-x_0}\end{split}\label{eq:Ec5_41} \tag{5.41}\end{equation*}reemplazamos estos dos valores en las siguientes dos ecuaciones\begin{equation*}\begin{split}\frac{2}{3}=\frac{2x_0^2x_1}{x_1-x_0}-\frac{2x_0x_1^2}{x_1-x_0}\end{split}\label{eq:Ec5_42} \tag{5.42}\end{equation*}\begin{equation*}\begin{split}0=\frac{2x_0^3x_1}{x_1-x_0}-\frac{2x_0x_1^3}{x_1-x_0}\end{split}\label{eq:Ec5_43} \tag{5.43}\end{equation*}de la ecuación ([5.43](Ec5_43)) se tiene\begin{equation*}\begin{split}x_0^3x_1&=x_0x_1^3 \\x_0^2 &= x_1^2\end{split}\label{eq:Ec5_44} \tag{5.44}\end{equation*}de aquí se tiene que $|x_0|=|x_1|$ (para considerar las raíces negativas recuerde que $\sqrt{a^2}= \pm a = |a|$), y como se asumió que $x_00$ (trabajando en el intervalo $[-1,1]$), llegándose finalmente a que $x_0=-x_1$. Reemplazando este resultado en la ecuación ([5.42](Ec5_42))\begin{equation*}\begin{split}\frac{2}{3}=2\frac{x_1^3+x_1^3}{2x_1}\end{split}\label{eq:Ec5_45} \tag{5.45}\end{equation*}despejando, $x_1^2=1/3$, y por último se llega a que\begin{equation*}\begin{split}x_0=-\frac{\sqrt{3}}{3}, \quad x_1=\frac{\sqrt{3}}{3}\end{split}\label{eq:Ec5_46} \tag{5.46}\end{equation*}reemplazando estos resultados en la ecuación ([5.41](Ec5_41)) y de la ecuación ([5.37](Ec5_37)), se tiene que $c_0=c_1=1$. Reescribiendo la ecuación ([5.36](Ec5_36)) con los valores encontrados se llega por último a:\begin{equation*}\begin{split}I=\int_{-1}^1 f(x)dx &\approx c_0f(x_0)+c_1f(x_1) \\&= f \left( \frac{-\sqrt{3}}{3}\right)+f \left( \frac{\sqrt{3}}{3}\right)\end{split}\label{eq:Ec5_47} \tag{5.47}\end{equation*}Esta aproximación realizada es "exacta" para polinomios de grado menor o igual a tres ($3$). La aproximación trapezoidal es exacta solo para polinomios de grado uno ($1$).***Ejemplo:*** Calcule la integral de la función $f(x)=x^3+2x^2+1$ en el intervalo $[-1,1]$ empleando tanto las técnicas analíticas como la cuadratura de Gauss vista.- ***Solución analítica (exacta)***$$\int_{-1}^1 (x^3+2x^2+1)dx=\left.\frac{x^4}{4}+\frac{2x^3}{3}+x \right |_{-1}^1=\frac{10}{3}$$- ***Aproximación numérica por Cuadratura de Gauss***\begin{equation*}\begin{split}\int_{-1}^1 (x^3+2x^2+1)dx &\approx1f\left(-\frac{\sqrt{3}}{3} \right)+1f\left(\frac{\sqrt{3}}{3} \right) \\&=-\frac{3\sqrt{3}}{27}+\frac{2\times 3}{9}+1+\frac{3\sqrt{3}}{27}+\frac{2\times 3}{9}+1 \\&=2+\frac{4}{3} \\&= \frac{10}{3}\end{split}\end{equation*} [Volver a la Tabla de Contenido](TOC) Cambios de los límites de integración Obsérvese que los límites de integración de la ecuación ([5.47](Ec5_47)) son de $-1$ a $1$. Esto se hizo para simplificar las matemáticas y para hacer la formulación tan general como fuera posible. Asumamos ahora que se desea determinar el valor de la integral entre dos límites cualesquiera $a$ y $b$. Supongamos también, que una nueva variable $x_d$ se relaciona con la variable original $x$ de forma lineal,\begin{equation*}\begin{split}x=a_0+a_1x_d\end{split}\label{eq:Ec5_48} \tag{5.48}\end{equation*}si el límite inferior, $x=a$, corresponde a $x_d=-1$, estos valores podrán sustituirse en la ecuación ([5.48](Ec5_48)) para obtener\begin{equation*}\begin{split}a=a_0+a_1(-1)\end{split}\label{eq:Ec5_49} \tag{5.49}\end{equation*}de manera similar, el límite superior, $x=b$, corresponde a $x_d=1$, para dar\begin{equation*}\begin{split}b=a_0+a_1(1)\end{split}\label{eq:Ec5_50} \tag{5.50}\end{equation*}resolviendo estas ecuaciones simultáneamente,\begin{equation*}\begin{split}a_0=(b+a)/2, \quad a_1=(b-a)/2\end{split}\label{eq:Ec5_51} \tag{5.51}\end{equation*}sustituyendo en la ecuación ([5.48](Ec5_48))\begin{equation*}\begin{split}x=\frac{(b+a)+(b-a)x_d}{2}\end{split}\label{eq:Ec5_52} \tag{5.52}\end{equation*}derivando la ecuación ([5.52](Ec5_52)),\begin{equation*}\begin{split}dx=\frac{b-a}{2}dx_d\end{split}\label{eq:Ec5_53} \tag{5.53}\end{equation*}Las ecuacio es ([5.51](Ec5_51)) y ([5.52](Ec5_52)) se pueden sustituir para $x$ y $dx$, respectivamente, en la evaluación de la integral. Estas sustituciones transforman el intervalo de integración sin cambiar el valor de la integral. En este caso\begin{equation*}\begin{split}\int_a^b f(x)dx = \frac{b-a}{2} \int_{-1}^1 f \left( \frac{(b+a)+(b-a)x_d}{2}\right)dx_d\end{split}\label{eq:Ec5_54} \tag{5.54}\end{equation*}Esta integral se puede aproximar como,\begin{equation*}\begin{split}\int_a^b f(x)dx \approx \frac{b-a}{2} \left[f\left( \frac{(b+a)+(b-a)x_0}{2}\right)+f\left( \frac{(b+a)+(b-a)x_1}{2}\right) \right]\end{split}\label{eq:Ec5_55} \tag{5.55}\end{equation*} [Volver a la Tabla de Contenido](TOC) Fórmulas de punto superior La fórmula anterior para la cuadratura de Gauss era de dos puntos. Se pueden desarrollar versiones de punto superior en la forma general:\begin{equation*}\begin{split}I \approx c_0f(x_0) + c_1f(x_1) + c_2f(x_2) +\ldots+ c_{n-1}f(x_{n-1})\end{split}\label{eq:Ec5_56} \tag{5.56}\end{equation*}con $n$, el número de puntos.Debido a que la cuadratura de Gauss requiere evaluaciones de la función en puntos espaciados uniformemente dentro del intervalo de integración, no es apropiada para casos donde se desconoce la función. Si se conoce la función, su ventaja es decisiva.En la siguiente tabla se presentan los valores de los parámertros para $1, 2, 3, 4$ y $5$ puntos. |$$n$$ | $$c_i$$ | $$x_i$$ ||:----:|:----------:|:-------------:||$$1$$ |$$2.000000$$| $$0.000000$$ ||$$2$$ |$$1.000000$$|$$\pm0.577350$$||$$3$$ |$$0.555556$$|$$\pm0.774597$$|| |$$0.888889$$| $$0.000000$$ ||$$4$$ |$$0.347855$$|$$\pm0.861136$$|| |$$0.652145$$|$$\pm0.339981$$||$$5$$ |$$0.236927$$|$$\pm0.906180$$|| |$$0.478629$$|$$\pm0.538469$$|| |$$0.568889$$| $$0.000000$$ |
###Code
import numpy as np
import pandas as pd
GaussTable = [[[0], [2]], [[-1/np.sqrt(3), 1/np.sqrt(3)], [1, 1]], [[-np.sqrt(3/5), 0, np.sqrt(3/5)], [5/9, 8/9, 5/9]], [[-0.861136, -0.339981, 0.339981, 0.861136], [0.347855, 0.652145, 0.652145, 0.347855]], [[-0.90618, -0.538469, 0, 0.538469, 0.90618], [0.236927, 0.478629, 0.568889, 0.478629, 0.236927]], [[-0.93247, -0.661209, -0.238619, 0.238619, 0.661209, 0.93247], [0.171324, 0.360762, 0.467914, 0.467914, 0.360762, 0.171324]]]
display(pd.DataFrame(GaussTable, columns=["Integration Points", "Corresponding Weights"]))
def IG(f, n):
n = int(n)
return sum([GaussTable[n - 1][1][i]*f(GaussTable[n - 1][0][i]) for i in range(n)])
def f(x): return x**9 + x**8
IG(f, 5.0)
###Output
_____no_output_____
###Markdown
[Volver a la Tabla de Contenido](TOC) Ejemplo Cuadratura de Gauss Determine el valor aproximado de:$$\int_0^1 \frac{4}{1+x^2}dx$$empleando cuadratura gaussiana de dos puntos.Reemplazando los parámetros requeridos en la ecuación ([5.55](Ec5_55)), donde $a=0$, $b=1$, $x_0=-\sqrt{3}/3$ y $x_1=\sqrt{3}/3$\begin{equation*}\begin{split}\int_0^1 f(x)dx &\approx \frac{1-0}{2} \left[f\left( \frac{(1+0)+(1-0)\left(-\frac{\sqrt{3}}{3}\right)}{2}\right)+f\left( \frac{(1+0)+(1-0)\left(\frac{\sqrt{3}}{3}\right)}{2}\right) \right]\\&= \frac{1}{2} \left[f\left( \frac{1-\frac{\sqrt{3}}{3}}{2}\right)+f\left( \frac{1+\frac{\sqrt{3}}{3}}{2}\right) \right]\\&= \frac{1}{2} \left[ \frac{4}{1 + \left( \frac{1-\frac{\sqrt{3}}{3}}{2} \right)^2}+\frac{4}{1 + \left( \frac{1+\frac{\sqrt{3}}{3}}{2} \right)^2} \right]\\&=3.147541\end{split}\end{equation*}Ahora veamos una breve implementación computacional
###Code
import numpy as np
def fxG(a, b, x):
xG = ((b + a) + (b - a) * x) / 2
return funcion(xG)
def GQ2(a,b):
c0 = 1.0
c1 = 1.0
x0 = -1.0 / np.sqrt(3)
x1 = 1.0 / np.sqrt(3)
return (b - a) / 2 * (c0 * fxG(a,b,x0) + c1 * fxG(a,b,x1))
print(GQ2(0,1))
###Output
_____no_output_____
###Markdown
[Volver a la Tabla de Contenido](TOC)
###Code
from IPython.core.display import HTML
def css_styling():
styles = open('./nb_style.css', 'r').read()
return HTML(styles)
css_styling()
###Output
_____no_output_____
###Markdown
ST0256 - Análisis NuméricoCapítulo 5: Diferenciación e integración numérica2021/01MEDELLÍN - COLOMBIA Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license.(c) Carlos Alberto Alvarez Henao *** ***Docente:*** Carlos Alberto Álvarez Henao, I.C. D.Sc.***e-mail:*** [email protected]***skype:*** carlos.alberto.alvarez.henao***Herramienta:*** [Jupyter notebook](http://jupyter.org/)***Kernel:*** Python 3.8*** Tabla de Contenidos1 Diferenciación Numérica1.1 Introducción1.2 Series de Taylor1.3 Esquemas de diferencias finitas para la primera derivada1.3.1 Esquema de primer orden hacia adelante (forward)1.3.2 Esquema de primer orden hacia atrás (backward)1.3.3 Esquema de segundo orden (central)1.3.4 Resumen esquemas diferencias finitas para la primera derivada1.4 Esquemas de diferencias finitas para la segunda derivada1.5 Implementación computacional de algunos esquemas de diferencias finitas2 Integración Numérica2.1 Introducción2.2 Fórmulas de integración de Newton - Cotes2.3 Regla trapezoidal2.3.1 Regla trapezoidal de aplicación simple2.3.2 Regla trapezoidal de aplicación múltiple2.3.3 Implementación computacional2.3.4 Error en la aplicación de la regla trapezoidal2.4 Reglas de Simpson2.4.1 Regla de Simpson1/3 de aplicación simple2.4.2 Error en la regla de Simpson 1/3 de aplicación simple2.4.3 Regla de simpson1/3 de aplicación múltiple2.4.4 Implementación computacional regla de Simpson1/3 de aplicación múltiple2.4.5 Regla de Simpson 3/8 de aplicación simple2.4.6 Regla de Simpson3/8 de aplicación múltiple2.4.7 Implementación computacional de la regla de Simpson3/8 de aplicación múltiple2.5 Cuadratura de Gauss2.5.1 Introducción2.5.2 Determinación de los coeficientes2.5.3 Cambios de los límites de integración2.5.4 Fórmulas de punto superior2.5.5 Ejemplo Cuadratura de Gauss Diferenciación Numérica Introducción La [diferenciación numérica](https://en.wikipedia.org/wiki/Numerical_differentiation) se emplea para determinar (estimar) el valor de la derivada de una función en un punto específico. No confundir con la derivada de una función, pues lo que se obtendrá es un valor puntual y no una función. En este capítulo nos centraremos únicamente en ecuiaciones unidimensionales. [Volver a la Tabla de Contenido](TOC) Series de Taylor De la [serie de Taylor](https://en.wikipedia.org/wiki/Taylor_series) \begin{equation*}f(x_{i \pm 1}) = f(x_i) \pm f'(x_i)h + \frac{f''(x_i)h^2}{2!} \pm \frac{f'''(x_i)h^3}{3!} + \ldots\label{eq:Ec5_1} \tag{5.1}\end{equation*}con $h=\Delta x = x_{i+1}-x_i$ siendo el tamaño de paso.Dada que la serie contiene infinitos términos, partir de la ecuación ($5.1$) se pueden obtener infinitos esquemas numéricos para determinar cada una de las infinitas derivadas de dicho polinomio. En este curso usaremos la técnica de [Diferencias Finitas](https://en.wikipedia.org/wiki/Finite_difference) para desarrollarlas. [Volver a la Tabla de Contenido](TOC) Esquemas de diferencias finitas para la primera derivada Esquema de primer orden hacia adelante (forward) De la ecuación [(5.1)](Ec5_1) tomando los valores positivos, que involucran únicamente términos hacia adelante, se trunca la serie hasta la primera derivada y se realiza un despeje algebraico para llegar a:\begin{equation*}f'(x_i) = \frac{f(x_{i+1})-f(x_i)}{h} + \mathcal{O}(h)\label{eq:Ec5_2} \tag{5.2}\end{equation*}se puede observar que el término $\mathcal{O}(h)$ indica que el error es de orden lineal, es decir, si se reduce el tamaño de paso, $h$, a la mitad, el error se reducirá a la mitad. Si se reduc el tamaño de paso a una cuarta parte, el error se reducirá, linealmente, una cuarta parte. [Volver a la Tabla de Contenido](TOC) Esquema de primer orden hacia atrás (backward) De la ecuación [(5.1)](Ec5_1) tomando los valores negativos, que involucran únicamente términos hacia atrás (backward), se trunca la serie hasta la primera derivada y se realiza un despeje algebraico para llegar a:\begin{equation*}f'(x_i) = \frac{f(x_{i})-f(x_{i-1})}{h} + \mathcal{O}(h)\label{eq:Ec5_3} \tag{5.3}\end{equation*}se observa que se llega a una expresión similar a la de la ecuación [(5.2)](Ec5_2), pero de esta vez, se tiene en cuenta es el valor anterior al punto $x_i$. También se observa que el error es de orden lineal, por lo que se mantiene un esquema de primer orden. [Volver a la Tabla de Contenido](TOC) Esquema de segundo orden (central) Una forma de aumentar el orden de estos esquemas, es realizar el truncamiento de la *serie de Taylor* hasta la segunda derivada, hacia adelante y hacia atras, y realizar su resta aritmética.\begin{equation*}\begin{split}f(x_{i+1}) & = f(x_i) + f'(x_i)h + \frac{f''(x_i)h^2}{2!} \\- \\f(x_{i-1}) & = f(x_i) - f'(x_i)h + \frac{f''(x_i)h^2}{2!} \\\hline \\f(x_{i+1}) - f(x_{i-1}) & = 2 f'(x_i)h\end{split}\label{eq:Ec5_4} \tag{5.4}\end{equation*} de la anterior ecuación, despejando el término que corresponde a la primera derivada queda:\begin{equation*}\begin{split}f'(x_i) = \frac{f(x_{i+1}) - f(x_{i-1})}{2h} + \mathcal{O}(h^2)\end{split}\label{eq:Ec5_5} \tag{5.5}\end{equation*}se llega al esquema de diferencias finitas central para la primera derivada, que es de orden dos, es decir, si se disminuye el tamaño de paso, $h$, a la mitad, el error se disminuye una cuarta partes. En principio, esta es una mejor aproximación que los dos esquemas anteriores. La selección del esquema dependerá de la disponibilidad de puntos y del fenómeno físico a tratar. [Volver a la Tabla de Contenido](TOC) Resumen esquemas diferencias finitas para la primera derivada Como la serie de Taylor es infinita, se podrían determinar infinitos esquemas de diferentes ordenes para la primera derivada. En la siguiente tabla se presentan algunos esquemas de diferencias finitas para la primera derivada de diferentes órdenes. Se deja al estudiante la consulta de otros esquemas.|***Esquema***|***Función***|***Error***||:-----:|:-----:|:---:||***Forward***|$$f´(x_0)=\frac{f(x_0+h)-f(x_0)}{h}$$|$$\mathcal{O}(h)$$|| |$$f´(x_0)=\frac{-3f(x_0)+4f(x_0+h)-f(x_0+2h)}{2h}$$|$$\mathcal{O}(h^2)$$||***Central***|$$f´(x_0)=\frac{f(x_0+h)-f(x_0-h)}{2h}$$|$$\mathcal{O}(h^2)$$|| |$$f´(x_0)=\frac{f(x_0-2h)-8f(x_0-h)+8f(x_0+h)-f(x_0+2h)}{12h}$$|$$\mathcal{O}(h^4)$$||***Backward***|$$f´(x_0)=\frac{f(x_0)-f(x_0-h)}{h}$$|$$\mathcal{O}(h)$$|| |$$f´(x_0)=\frac{f(x_0-2h)-4f(x_0-h)+3f(x_0)}{2h}$$|$$\mathcal{O}(h^2)$$| [Volver a la Tabla de Contenido](TOC) Esquemas de diferencias finitas para la segunda derivada Siguiendo con la misma forma de abordar el problema para la primera derivada, si se amplian los términos en la serie de Taylor hasta la tercera derivada tanto hacia adelante como hacia atrás, y se suman, se llega a:\begin{equation*}\begin{split}f(x_{i+1}) & = f(x_i) + f'(x_i)h + \frac{f''(x_i)h^2}{2!} + \frac{f'''(x_i)h^3}{3!}\\+ \\f(x_{i-1}) & = f(x_i) - f'(x_i)h + \frac{f''(x_i)h^2}{2!} - \frac{f'''(x_i)h^3}{3!}\\\hline \\f(x_{i+1}) + f(x_{i-1}) & = 2 f(x_i) + 2f''(x_i)\frac{h^2}{2!} + \mathcal{O}(h^3)\end{split}\label{eq:Ec5_6} \tag{5.6}\end{equation*} Despejando para el término de la segunda derivada, se llega a:\begin{equation*}\begin{split}f''(x_i) = \frac{f(x_{i+1}) - 2f(x_i) + f(x_{i-1})}{h^2} + \mathcal{O}(h^3)\end{split}\label{eq:Ec5_7} \tag{5.7}\end{equation*}Que corresponde a un esquema de diferencias finitas de segundo orden para la segunda derivada. A este esquema también se le llama "*molécula de tres puntos*"Igual que para la primera derivada, se pueden determinar infinitos esquemas de diferentes órdenes para la segunda derivada, y derivadas superiores. A continuación se muestra un cuadro resumen de algunos esquemas de diferencias finitas para la segunda derivada. Se deja al estudiante la revisión de esquemas de mayor orden para la segunda derivada y derivadas superiores.|***Esquema***|***Función***|***Error***||:-----:|:-----:|:---:||***Forward***|$$f''(x_0)=\frac{f(x_0)-2f(x_0+h)+f(x_0+2h)}{h^2}$$|$$\mathcal{O}(h)$$|| |$$f''(x_0)=\frac{2f(x_0)-5f(x_0+h)+4f(x_0+2h)-f(x_0+3h)}{h^2}$$|$$\mathcal{O}(h^2)$$||***Central***|$$f''(x_0)=\frac{f(x_0-h)-2f(x_0)+f(x_0+h)}{h^2}$$|$$\mathcal{O}(h^2)$$|| |$$f''(x_0)=\frac{-f(x_0-2h)+16f(x_0-h)-30f(x_0)+16f(x_0+h)-f(x_0+2h)}{12h^2}$$|$$\mathcal{O}(h^4)$$||***Backward***|$$f''(x_0)=\frac{f(x_0-2h)-2f(x_0-h)+f(x_0)}{h}$$|$$\mathcal{O}(h^2)$$|| |$$f''(x_0)=\frac{-f(x_0-3h)+4f(x_0-2h)-5f(x_0-h)+2f(x_0)}{h^2}$$|$$\mathcal{O}(h^2)$$| [Volver a la Tabla de Contenido](TOC) Implementación computacional de algunos esquemas de diferencias finitas A manera de ejemplo, se implementarán algunos esquemas simples de diferencias finitas para la primera derivada. Se deja como actividad a los estudiantes la implementación de otros esquemas para las diferentes derivadas.
###Code
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
sym.init_printing()
#Esquemas de diferencias finitas para la primera derivada
def df1df(x0, h):
# Esquema de diferencias finitas para la primera derivada hacia adelante (forward)
return (f(x0 + h) - f(x0)) / h
def df1db(x0, h):
# Esquema de diferencias finitas para la primera derivada hacia atrás (backward)
return (f(x0) - f(x0 - h) ) / h
def df1dc(x0,h):
# Esquema de diferencias finitas para la primera derivada central (central)
return (f(x0 + h) - f(x0 - h) ) / (2 * h)
#funcion a determinar el valor de la derivada
def f(x):
return 2*x**3 - 3*x**2 + 5*x+0.8
#cálculo y evaluación de la primera derivada empleando cálculo simbólico
def df1de(x0):
x = sym.Symbol('x')
df = sym.diff(f(x), x)
#print(df)
df1 = df.evalf(subs={x:x0})
return df1
h = 0.1
x0 = 0.8
print("1st derivative \t Value \t\t Error(%)")
print('---------------------------------------')
pde = df1de(x0)
pdf = df1df(x0, h)
epdf = abs((pde - pdf) / pde * 100)
print("forward \t {0:6.4f} \t {1:6.2f}".format(pdf,epdf))
pdb = df1db(x0, h)
epdb = abs((pde - pdb) / pde * 100)
print("backward \t {0:6.4f} \t {1:6.2f}".format(pdb,epdb))
pdc = df1dc(x0,h)
epdc = abs((pde - pdc) / pde * 100)
print("central \t {0:6.4f} \t {1:6.2f}".format(pdc, epdc))
print("exacta \t\t {0:6.4f} \t {1}".format(pde, ' -'))
###Output
_____no_output_____
###Markdown
[Volver a la Tabla de Contenido](TOC) Integración Numérica Introducción La [integración numérica](https://en.wikipedia.org/wiki/Numerical_integration) aborda una amplia gama de algoritmos para determinar el valor numérico (aproximado) de una integral definida. En este curso nos centraremos principalmente en los métodos de cuadratura, tanto de interpolación como [gaussiana](https://en.wikipedia.org/wiki/Gaussian_quadrature), como dos ejemplos de dichos algoritmos. El problema a tratar en este capítulo es la solución aproximada de la función\begin{equation*}\begin{split}I = \int_a^b f(x) dx\end{split}\label{eq:Ec5_8} \tag{5.8}\end{equation*} [Volver a la Tabla de Contenido](TOC) Fórmulas de integración de *Newton - Cotes* La idea básica en la integración numérica es cambiar una función difícil de integrar, $f(x)$, dada por la ecuación [(5.8)](Ec5_8), por una función más simple, $p_n(x)$,\begin{equation*}\begin{split}\widetilde{I} \approx \int_{a=x_0}^{b=x_n} p_{n}(x) dx\end{split}\label{eq:Ec5_9} \tag{5.9}\end{equation*}Cabe resaltar que en integración numérica no se conocerá la función a integrar, solo se dispondrá de una serie de $n+1$ puntos $(x_i, y_i), i = 0, 1, 2, \ldots, n$, y a partir de ellos se construye un polinomio interpolante de grado $n$, $p_n$, entre los valores de los límites de integración $a = x_0$ y $b=x_n$. $p_n(x)$ es un polinomio de interpolación de la forma\begin{equation*}\begin{split}p_n(x)=a_0+a_1x+a_2x^2+\ldots+a_{n-1}x^{n-1}+a_nx^n\end{split}\label{eq:Ec5_10} \tag{5.10}\end{equation*}Las fórmulas de integración de [*Newton - Cotes*](https://en.wikipedia.org/wiki/Newton%E2%80%93Cotes_formulas), también llamadas de [cuadratura](https://en.wikipedia.org/wiki/Quadrature_(mathematics)), son un grupo de fórmulas de integración numérica de tipo interpolación, evaluando la función en puntos equidistantes, para determinar un valor aproximado de la integral. Si no se tienen puntos espaciados, otros métodos deben ser usados, como por ejemplo cuadratura gaussiana, que se verá al final del capítulo.La forma general de las fórmulas de Newton - Cotes está dada por la función:\begin{equation*}\begin{split}p_n(x)=\sum \limits_{i=0}^n f(x_i)L_{in}(x)\end{split}\label{eq:Ec5_11} \tag{5.11}\end{equation*}donde\begin{equation*}\begin{split}L_{in}(x)=\frac{(x-x_0)\ldots(x-x_{i-1})(x-x_{i+1})\ldots(x-x_n)}{(x_i-x_0)\ldots(x_i-x_{i-1})(x_i-x_{i+1})\ldots(x_i-x_n)}\end{split}\label{eq:Ec5_12} \tag{5.12}\end{equation*}es el polinomio de Lagrange, de donde se deduce que:\begin{equation*}\begin{split}\int_a^b p(x)dx=(b-a)\sum \limits_{i=0}^n f(x_i) \frac{1}{(b-a)} \int_a^b L_{in}(x)dx\end{split}\label{eq:Ec5_13} \tag{5.13}\end{equation*}entonces,\begin{equation*}\begin{split}\int_a^b f(x)dx \approx \int_a^b p(x)dx=(b-a)\sum \limits_{i=0}^n w_if(x_i) \end{split}\label{eq:Ec5_14} \tag{5.14}\end{equation*}donde los pesos, $w_i$ de la función son representados por\begin{equation*}\begin{split}w_i=\frac{1}{(b-a)} \int_a^b L_{in}(x)dx\end{split}\label{eq:Ec5_15} \tag{5.15}\end{equation*}A partir de esta idea se obtienen los diferentes esquemas de integración numérica de *Newton - Cotes* [Volver a la Tabla de Contenido](TOC) Regla trapezoidal Regla trapezoidal de aplicación simple La [regla trapezoidal](https://en.wikipedia.org/wiki/Trapezoidal_rule) emplea una aproximación de la función mediante una línea recta Fuente: wikipedia.com y corresponde al caso en el que el polinomio en la ecuación [(5.11)](Ec5_11) es de primer orden\begin{equation*}\begin{split}I=\int_{a}^{b}f(x)dx \approx \int_a^b \left[ f(a) + \frac{f(b)-f(a)}{b-a}(x-a)\right]dx= (b-a)\frac{f(a)+f(b)}{2}\end{split}\label{eq:Ec5_16} \tag{5.16}\end{equation*}Geométricamente, es equivalente a aproximar el área del trapezoide bajo la línea recta que conecta $f(a)$ y $f(b)$. La integral se representa como:$$I ≈ \text{ancho} \times \text{altura promedio}$$El error en la regla trapezoidal simple se puede determinar como:\begin{equation*}\begin{split}E_t=-\frac{1}{12}f''(\xi)(b-a)^3\end{split}\label{eq:Ec5_17} \tag{5.17}\end{equation*} [Volver a la Tabla de Contenido](TOC) Regla trapezoidal de aplicación múltiple Una manera de mejorar la exactitud de la regla trapezoidal es dividir el intervalo de integración de $a$ a $b$ en un número $n$ de segmentos y aplicar el método a cada uno de ellos. Las ecuaciones resultantes son llamadas fórmulas de integración de múltiple aplicación o compuestas. Fuente: wikipedia.com Hay $n+1$ puntos base igualmente espaciados $(x_0, x_1, x_2, \ldots, x_n)$. En consecuencia hay $n$ segmentos de igual anchura: $h = (b–a) / n$. Si $a$ y $b$ son designados como $x_0$ y $x_n$ respectivamente, la integral total se representará como:\begin{equation*}\begin{split}I=\int_{x_0}^{x_1}f(x)dx+\int_{x_1}^{x_2}f(x)dx+\int_{x_2}^{x_3}f(x)dx+\ldots+\int_{x_{n-2}}^{x_{n-1}}f(x)dx+\int_{x_{n-1}}^{x_n}f(x)dx\end{split}\label{eq:Ec5_18} \tag{5.18}\end{equation*}Al sustituir la regla trapezoidal simple en cada integrando, se tiene\begin{equation*}\begin{split}I\approx \left(f(x_0)+f(x_1)\right)\frac{h}{2}+\left(f(x_1)+f(x_2)\right)\frac{h}{2}+\left(f(x_2)+f(x_3)\right)\frac{h}{2}+\ldots\left(f(x_{n-2})+f(x_{n-1})\right)\frac{h}{2}+\left(f(x_{n-1})+f(x_n)\right)\frac{h}{2}\end{split}\label{eq:Ec5_19} \tag{5.19}\end{equation*}ahora, agrupando términos\begin{equation*}\begin{split}I\approx \frac{h}{2}\left[ f(x_0) + 2\sum_{i=1}^{n-1}f(x_i)+f(x_n) \right]\end{split}\label{eq:Ec5_20} \tag{5.20}\end{equation*}donde $h=(b-a)/n$ [Volver a la Tabla de Contenido](TOC) Implementación computacional
###Code
import numpy as np
import matplotlib.pyplot as plt
def trapezoidal(x):
n = len(x)
h = (x[-1] - x[0]) / n
suma = 0
for i in range(1, n-1):
suma += funcion(x[i])
return h * (funcion(x[0]) + 2 * suma + funcion(x[-1])) / 2
def funcion(x):
return 4 / (1 + x**2)
a = 0
b = 1
n = 1000
x = np.linspace(a, b, n+1)
I = trapezoidal(x)
I
###Output
_____no_output_____
###Markdown
[Volver a la Tabla de Contenido](TOC) Error en la aplicación de la regla trapezoidal Recordando que estos esquemas provienen de la serie truncada de Taylor, el error se puede obtener determinando el primer término truncado en el esquema, que para la regla trapezoidal de aplicación simple corresponde a:\begin{equation*}\begin{split}E_t=-\frac{1}{12}f''(\xi)(b-a)^3\end{split}\label{eq:Ec5_21} \tag{5.21}\end{equation*}donde $f''(\xi)$ es la segunda derivada en el punto $\xi$ en el intervalo $[a,b]$, y $\xi$ es un valor que maximiza la evaluación de esta segunda derivada. Generalizando este concepto a la aplicación múltiple de la regla trapezoidal, se pueden sumar cada uno de los errores en cada segmento para dar:\begin{equation*}\begin{split}E_t=-\frac{(b-a)^3}{12n^3}\sum\limits_{i=1}^n f''(\xi_i)\end{split}\label{eq:Ec5_22} \tag{5.22}\end{equation*}el anterior resultado se puede simplificar estimando la media, o valor promedio, de la segunda derivada para todo el intervalo\begin{equation*}\begin{split}\bar{f''} \approx \frac{\sum \limits_{i=1}^n f''(\xi_i)}{n}\end{split}\label{eq:Ec5_23} \tag{5.23}\end{equation*}de esta ecuación se tiene que $\sum f''(\xi_i)\approx nf''$, y reemplazando en la ecuación [(5.23)](Ec5_23)\begin{equation*}\begin{split}E_t \approx \frac{(b-a)^3}{12n^2}\bar{f''}\end{split}\label{eq:Ec5_24} \tag{5.24}\end{equation*}De este resultado se observa que si se duplica el número de segmentos, el error de truncamiento se disminuirá a una cuarta parte. [Volver a la Tabla de Contenido](TOC) Reglas de Simpson Las [reglas de Simpson](https://en.wikipedia.org/wiki/Simpson%27s_rule) son esquemas de integración numérica en honor al matemático [*Thomas Simpson*](https://en.wikipedia.org/wiki/Thomas_Simpson), utilizado para obtener la aproximación de la integral empleando interpolación polinomial sustituyendo a $f(x)$. [Volver a la Tabla de Contenido](TOC) Regla de Simpson1/3 de aplicación simple La primera regla corresponde a una interpolación polinomial de segundo orden sustituida en la ecuación [(5.8)](Ec5_8) Fuente: wikipedia.com \begin{equation*}\begin{split}I=\int_a^b f(x)dx \approx \int_a^b p_2(x)dx\end{split}\label{eq:Ec5_25} \tag{5.25}\end{equation*}del esquema de interpolación de Lagrange para un polinomio de segundo grado, visto en el capitulo anterior, y remplazando en la integral arriba, se llega a \begin{equation*}\begin{split}I\approx\int_{x0}^{x2} \left[\frac{(x-x_1)(x-x_2)}{(x_0-x_1)(x_0-x_2)}f(x_0)+\frac{(x-x_0)(x-x_2)}{(x_1-x_0)(x_1-x_2)}f(x_1)+\frac{(x-x_0)(x-x_1)}{(x_2-x_0)(x_2-x_1)}f(x_2)\right]dx\end{split}\label{eq:Ec5_26} \tag{5.26}\end{equation*}realizando la integración de forma analítica y un manejo algebraico, resulta\begin{equation*}\begin{split}I\approx\frac{h}{3} \left[ f(x_0)+4f(x_1)+f(x_2)\right]\end{split}\label{eq:Ec5_27} \tag{5.27}\end{equation*}donde $h=(b-a)/2$ y los $x_{i+1} = x_i + h$ A continuación, vamos a comparar graficamente las funciones "exacta" (con muchos puntos) y una aproximada empleando alguna técnica de interpolación para $n=3$ puntos (Polinomio interpolante de orden $2$).
###Code
from scipy.interpolate import barycentric_interpolate
# usaremos uno de los tantos métodos de interpolación dispobibles en las bibliotecas de Python
n = 3 # puntos a interpolar para un polinomio de grado 2
xp = np.linspace(a,b,n) # generación de n puntos igualmente espaciados para la interpolación
fp = funcion(xp) # evaluación de la función en los n puntos generados
x = np.linspace(a, b, 100) # generación de 100 puntos igualmente espaciados
y = barycentric_interpolate(xp, fp, x) # interpolación numérica empleando el método del Baricentro
fig = plt.figure(figsize=(9, 6), dpi= 80, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
l, = plt.plot(x, y)
plt.plot(x, funcion(x), '-', c='red')
plt.plot(xp, fp, 'o', c=l.get_color())
plt.annotate('Función "Real"', xy=(.63, 1.5), xytext=(0.8, 1.25),arrowprops=dict(facecolor='black', shrink=0.05),)
plt.annotate('Función interpolada', xy=(.72, 1.75), xytext=(0.4, 2),arrowprops=dict(facecolor='black', shrink=0.05),)
plt.grid(True) # muestra la malla de fondo
plt.show() # muestra la gráfica
###Output
_____no_output_____
###Markdown
Se observa que hay una gran diferencia entre las áreas que se estarían abarcando en la función llamada "*real*" (que se emplearon $100$ puntos para su generación) y la función *interpolada* (con únicamente $3$ puntos para su generación) que será la empleada en la integración numérica (aproximada) mediante la regla de *Simpson $1/3$*.Conscientes de esto, procederemos entonces a realizar el cálculo del área bajo la curva del $p_3(x)$ empleando el método de *Simpson $1/3$* Creemos un programa en *Python* para que nos sirva para cualquier función $f(x)$ que queramos integrar en cualquier intervalo $[a,b]$ empleando la regla de integración de *Simpson $1/3$*:
###Code
# se ingresan los valores del intervalo [a,b]
a = float(input('Ingrese el valor del límite inferior: '))
b = float(input('Ingrese el valor del límite superior: '))
# cuerpo del programa por la regla de Simpson 1/3
h = (b-a)/2 # cálculo del valor de h
x0 = a # valor del primer punto para la fórmula de S1/3
x1 = x0 + h # Valor del punto intermedio en la fórmula de S1/3
x2 = b # valor del tercer punto para la fórmula de S1/3
fx0 = funcion(x0) # evaluación de la función en el punto x0
fx1 = funcion(x1) # evaluación de la función en el punto x1
fx2 = funcion(x2) # evaluación de la función en el punto x2
int_S13 = h / 3 * (fx0 + 4*fx1 + fx2)
#erel = np.abs(exacta - int_S13) / exacta * 100
print('el valor aproximado de la integral por la regla de Simpson1/3 es: ', int_S13, '\n')
#print('el error relativo entre el valor real y el calculado es: ', erel,'%')
###Output
_____no_output_____
###Markdown
[Volver a la Tabla de Contenido](TOC) Error en la regla de Simpson 1/3 de aplicación simple El problema de calcular el error de esta forma es que realmente no conocemos el valor exacto. Para poder calcular el error al usar la regla de *Simpson 1/3*:\begin{equation*}\begin{split}-\frac{h^5}{90}f^{(4)}(\xi)\end{split}\label{eq:Ec5_28} \tag{5.28}\end{equation*}será necesario derivar cuatro veces la función original: $f(x)=e^{x^2}$. Para esto, vamos a usar nuevamente el cálculo simbólico (siempre deben verificar que la respuesta obtenida es la correcta!!!):
###Code
from sympy import *
x = symbols('x')
###Output
_____no_output_____
###Markdown
Derivamos cuatro veces la función $f(x)$ con respecto a $x$:
###Code
deriv4 = diff(4 / (1 + x**2),x,4)
deriv4
###Output
_____no_output_____
###Markdown
y evaluamos esta función de la cuarta derivada en un punto $0 \leq \xi \leq 1$. Como la función $f{^{(4)}}(x)$ es creciente en el intervalo $[0,1]$ (compruébelo gráficamente y/o por las técnicas vistas en cálculo diferencial), entonces, el valor que hace máxima la cuarta derivada en el intervalo dado es:
###Code
x0 = 1.0
evald4 = deriv4.evalf(subs={x: x0})
print('El valor de la cuarta derivada de f en x0={0:6.2f} es {1:6.4f}: '.format(x0, evald4))
###Output
_____no_output_____
###Markdown
Calculamos el error en la regla de *Simpson$1/3$*
###Code
errorS13 = abs(h**5*evald4/90)
print('El error al usar la regla de Simpson 1/3 es: {0:6.6f}'.format(errorS13))
###Output
_____no_output_____
###Markdown
Entonces, podemos expresar el valor de la integral de la función $f(x)=e^{x^2}$ en el intervalo $[0,1]$ usando la *Regla de Simpson $1/3$* como:$$\color{blue}{\int_0^1 \frac{4}{1 + x^2}dx} = \color{green}{3,133333} \color{red}{+ 0.004167}$$ Si lo fuéramos a hacer "a mano" $\ldots$ aplicando la fórmula directamente, con los siguientes datos:$h = \frac{(1.0 - 0.0)}{2.0} = 0.5$$x_0 = 0.0$$x_1 = 0.5$$x_2 = 1.0$$f(x) = \frac{4}{1 + x^2}$sustituyendo estos valores en la fórmula dada:$\int_0^1\frac{4}{1 + x^2}dx \approx \frac{0.5}{3} \left[f(0)+4f(0.5)+f(1)\right]$$\int_0^1\frac{4}{1 + x^2}dx \approx \frac{0.5}{3} \left[ \frac{4}{1 + 0^2} + 4\frac{4}{1 + 0.5^2} + \frac{4}{1 + 1^2} \right] \approx 3.133333$ [Volver a la Tabla de Contenido](TOC) Regla de simpson1/3 de aplicación múltiple Al igual que en la regla Trapezoidal, las reglas de Simpson también cuentan con un esquema de aplicación múltiple (llamada también compuesta). Supongamos que se divide el intervalo $[a,b]$ se divide en $n$ sub intervalos, con $n$ par, quedando la integral\begin{equation*}\begin{split}I=\int_{x_0}^{x_2}f(x)dx+\int_{x_2}^{x_4}f(x)dx+\ldots+\int_{x_{n-2}}^{x_n}f(x)dx\end{split}\label{eq:Ec5_29} \tag{5.29}\end{equation*}y sustituyendo en cada una de ellas la regla de Simpson1/3, se llega a\begin{equation*}\begin{split}I \approx 2h\frac{f(x_0)+4f(x_1)+f(x_2)}{6}+2h\frac{f(x_2)+4f(x_3)+f(x_4)}{6}+\ldots+2h\frac{f(x_{n-2})+4f(x_{n-1})+f(x_n)}{6}\end{split}\label{eq:Ec5_30} \tag{5.30}\end{equation*}entonces la regla de Simpson compuesta (o de aplicación múltiple) se escribe como:\begin{equation*}\begin{split}I=\int_a^bf(x)dx\approx \frac{h}{3}\left[f(x_0) + 2 \sum \limits_{j=1}^{n/2-1} f(x_{2j}) + 4 \sum \limits_{j=1}^{n/2} f(x_{2j-1})+f(x_n)\right]\end{split}\label{eq:Ec5_31} \tag{5.31}\end{equation*}donde $x_j=a+jh$ para $j=0,1,2, \ldots, n-1, n$ con $h=(b-a)/n$, $x_0=a$ y $x_n=b$. [Volver a la Tabla de Contenido](TOC) Implementación computacional regla de Simpson1/3 de aplicación múltiple [Volver a la Tabla de Contenido](TOC) Regla de Simpson 3/8 de aplicación simple Resulta cuando se sustituye la función $f(x)$ por una interpolación de tercer orden:\begin{equation*}\begin{split}I=\int_{a}^{b}f(x)dx = \frac{3h}{8}\left[ f(x_0)+3f(x_1)+3f(x_2)+f(x_3) \right]\end{split}\label{eq:Ec5_32} \tag{5.32}\end{equation*} Realizando un procedimiento similar al usado para la regla de *Simpson $1/3$*, pero esta vez empleando $n=4$ puntos:
###Code
# usaremos uno de los tantos métodos de interpolación dispobibles en las bibliotecas de Python
n = 4 # puntos a interpolar para un polinomio de grado 2
xp = np.linspace(0,1,n) # generación de n puntos igualmente espaciados para la interpolación
fp = funcion(xp) # evaluación de la función en los n puntos generados
x = np.linspace(0, 1, 100) # generación de 100 puntos igualmente espaciados
y = barycentric_interpolate(xp, fp, x) # interpolación numérica empleando el método del Baricentro
fig = plt.figure(figsize=(9, 6), dpi= 80, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
l, = plt.plot(x, y)
plt.plot(x, funcion(x), '-', c='red')
plt.plot(xp, fp, 'o', c=l.get_color())
plt.annotate('"Real"', xy=(.63, 1.5), xytext=(0.8, 1.25),arrowprops=dict(facecolor='black', shrink=0.05),)
plt.annotate('Interpolación', xy=(.72, 1.75), xytext=(0.4, 2),arrowprops=dict(facecolor='black', shrink=0.05),)
plt.grid(True) # muestra la malla de fondo
plt.show() # muestra la gráfica
# cuerpo del programa por la regla de Simpson 3/8
h = (b - a) / 3 # cálculo del valor de h
int_S38 = 3 * h / 8 * (funcion(a) + 3*funcion(a + h) + 3*funcion(a + 2*h) + funcion(a + 3*h))
erel = np.abs(np.pi - int_S38) / np.pi * 100
print('el valor aproximado de la integral utilizando la regla de Simpson 3/8 es: ', int_S38, '\n')
print('el error relativo entre el valor real y el calculado es: ', erel,'%')
###Output
_____no_output_____
###Markdown
Para poder calcular el error al usar la regla de *Simpson 3/8*:$$\color{red}{-\frac{3h^5}{80}f^{(4)}(\xi)}$$será necesario derivar cuatro veces la función original. Para esto, vamos a usar nuevamente el cálculo simbólico (siempre deben verificar que la respuesta obtenida es la correcta!!!):
###Code
errorS38 = 3*h**5*evald4/80
print('El error al usar la regla de Simpson 3/8 es: ',errorS38)
###Output
_____no_output_____
###Markdown
Entonces, podemos expresar el valor de la integral de la función $f(x)=e^{x^2}$ en el intervalo $[0,1]$ usando la *Regla de Simpson $3/8$* como:$$\color{blue}{\int_0^1\frac{4}{1 + x^2}dx} = \color{green}{3.138462} \color{red}{- 0.001852}$$ Aplicando la fórmula directamente, con los siguientes datos:$h = \frac{(1.0 - 0.0)}{3.0} = 0.33$$x_0 = 0.0$, $x_1 = 0.33$, $x_2 = 0.66$, $x_3 = 1.00$$f(x) = \frac{4}{1 + x^2}$sustituyendo estos valores en la fórmula dada:$\int_0^1\frac{4}{1 + x^2}dx \approx \frac{3\times0.3333}{8} \left[ \frac{4}{1 + 0^2} + 3\frac{4}{1 + 0.3333^2} +3\frac{4}{1 + 0.6666^2} + \frac{4}{1 + 1^2} \right] \approx 3.138462$Esta sería la respuesta si solo nos conformamos con lo que podemos hacer usando word... [Volver a la Tabla de Contenido](TOC) Regla de Simpson3/8 de aplicación múltiple Dividiendo el intervalo $[a,b]$ en $n$ sub intervalos de longitud $h=(b-a)/n$, con $n$ múltiplo de 3, quedando la integral\begin{equation*}\begin{split}I=\int_{x_0}^{x_3}f(x)dx+\int_{x_3}^{x_6}f(x)dx+\ldots+\int_{x_{n-3}}^{x_n}f(x)dx\end{split}\label{eq:Ec5_33} \tag{5.33}\end{equation*}sustituyendo en cada una de ellas la regla de Simpson3/8, se llega a\begin{equation*}\begin{split}I=\int_a^bf(x)dx\approx \frac{3h}{8}\left[f(x_0) + 3 \sum \limits_{i=0}^{n/3-1} f(x_{3i+1}) + 3 \sum \limits_{i=0}^{n/3-1}f(x_{3i+2})+2 \sum \limits_{i=0}^{n/3-2} f(x_{3i+3})+f(x_n)\right]\end{split}\label{eq:Ec5_34} \tag{5.34}\end{equation*}donde en cada sumatoria se deben tomar los valores de $i$ cumpliendo que $i=i+3$. [Volver a la Tabla de Contenido](TOC) Implementación computacional de la regla de Simpson3/8 de aplicación múltiple
###Code
#
###Output
_____no_output_____
###Markdown
[Volver a la Tabla de Contenido](TOC) Cuadratura de Gauss Introducción Retomando la idea inicial de los esquemas de [cuadratura](Quadrature), el valor de la integral definida se estima de la siguiente manera:\begin{equation*}\begin{split}I=\int_a^b f(x)dx \approx \sum \limits_{i=0}^n c_if(x_i)\end{split}\label{eq:Ec5_35} \tag{5.35}\end{equation*}Hasta ahora hemos visto los métodos de la regla trapezoidal y las reglas de Simpson más empleadas. En estos esquemas, la idea central es la distribución uniforme de los puntos que siguen la regla $x_i=x_0+ih$, con $i=0,1,2, \ldots, n$ y la evaluación de la función en estos puntos.Supongamos ahora que la restricción de la uniformidad en el espaciamiento de esos puntos fijos no es más considerada y se tiene la libertad de evaluar el área bajo una recta que conecte a dos puntos cualesquiera sobre la curva. Al ubicar estos puntos en forma “inteligente”, se puede definir una línea recta que equilibre los errores negativos y positivos Fuente: Chapra, S., Canale, R. Métodos Numéricos para ingenieros, 5a Ed. Mc. Graw Hill. 2007 De la figura de la derecha, se disponen de los puntos $x_0$ y $x_1$ para evaluar la función $f(x)$. Expresando la integral bajo la curva de forma aproximada dada en la la ecuación ([5.35](Ec5_35)), y empleando los límites de integración en el intervalo $[-1,1]$ por simplicidad (después se generalizará el concepto a un intervalo $[a,b]$), se tiene\begin{equation*}\begin{split}I=\int_{-1}^1 f(x)dx \approx c_0f(x_0)+c_1f(x_1)\end{split}\label{eq:Ec5_36} \tag{5.36}\end{equation*} [Volver a la Tabla de Contenido](TOC) Determinación de los coeficientes se tiene una ecuación con cuatro incógnitas ($c_0, c_1, x_0$ y $x_1$) que se deben determinar. Para ello, supongamos que disponemos de un polinomio de hasta grado 3, $f_3(x)$, de donde podemos construir cuatro ecuaciones con cuatro incógnitas de la siguiente manera:- $f_3(x)=1$:\begin{equation*}\begin{split}\int_{-1}^1 1dx = c_0 \times 1 + c_1 \times 1 = c_0 + c_1 = 2\end{split}\label{eq:Ec5_37} \tag{5.37}\end{equation*}- $f_3(x)=x$:\begin{equation*}\begin{split}\int_{-1}^1 xdx = c_0x_0 + c_1x_1 = 0\end{split}\label{eq:Ec5_38} \tag{5.38}\end{equation*}- $f_3(x)=x^2$:\begin{equation*}\begin{split}\int_{-1}^1 x^2dx = c_0x^2_0 + c_1x^2_1 = \frac{2}{3}\end{split}\label{eq:Ec5_39} \tag{5.39}\end{equation*}y por último- $f_3(x)=x^3$:\begin{equation*}\begin{split}\int_{-1}^1 x^3dx = c_0x^3_0 + c_1x^3_1 = 0\end{split}\label{eq:Ec5_40} \tag{5.40}\end{equation*}resolviendo simultáneamente las dos primeras ecuaciones para $c_0$ y $c_1$ en térm,inos de $x_0$ y $x_1$, se llega a\begin{equation*}\begin{split}c_0=\frac{2x_1}{x_1-x_0}, \quad c_1=-\frac{2x_0}{x_1-x_0}\end{split}\label{eq:Ec5_41} \tag{5.41}\end{equation*}reemplazamos estos dos valores en las siguientes dos ecuaciones\begin{equation*}\begin{split}\frac{2}{3}=\frac{2x_0^2x_1}{x_1-x_0}-\frac{2x_0x_1^2}{x_1-x_0}\end{split}\label{eq:Ec5_42} \tag{5.42}\end{equation*}\begin{equation*}\begin{split}0=\frac{2x_0^3x_1}{x_1-x_0}-\frac{2x_0x_1^3}{x_1-x_0}\end{split}\label{eq:Ec5_43} \tag{5.43}\end{equation*}de la ecuación ([5.43](Ec5_43)) se tiene\begin{equation*}\begin{split}x_0^3x_1&=x_0x_1^3 \\x_0^2 &= x_1^2\end{split}\label{eq:Ec5_44} \tag{5.44}\end{equation*}de aquí se tiene que $|x_0|=|x_1|$ (para considerar las raíces negativas recuerde que $\sqrt{a^2}= \pm a = |a|$), y como se asumió que $x_00$ (trabajando en el intervalo $[-1,1]$), llegándose finalmente a que $x_0=-x_1$. Reemplazando este resultado en la ecuación ([5.42](Ec5_42))\begin{equation*}\begin{split}\frac{2}{3}=2\frac{x_1^3+x_1^3}{2x_1}\end{split}\label{eq:Ec5_45} \tag{5.45}\end{equation*}despejando, $x_1^2=1/3$, y por último se llega a que\begin{equation*}\begin{split}x_0=-\frac{\sqrt{3}}{3}, \quad x_1=\frac{\sqrt{3}}{3}\end{split}\label{eq:Ec5_46} \tag{5.46}\end{equation*}reemplazando estos resultados en la ecuación ([5.41](Ec5_41)) y de la ecuación ([5.37](Ec5_37)), se tiene que $c_0=c_1=1$. Reescribiendo la ecuación ([5.36](Ec5_36)) con los valores encontrados se llega por último a:\begin{equation*}\begin{split}I=\int_{-1}^1 f(x)dx &\approx c_0f(x_0)+c_1f(x_1) \\&= f \left( \frac{-\sqrt{3}}{3}\right)+f \left( \frac{\sqrt{3}}{3}\right)\end{split}\label{eq:Ec5_47} \tag{5.47}\end{equation*}Esta aproximación realizada es "exacta" para polinomios de grado menor o igual a tres ($3$). La aproximación trapezoidal es exacta solo para polinomios de grado uno ($1$).***Ejemplo:*** Calcule la integral de la función $f(x)=x^3+2x^2+1$ en el intervalo $[-1,1]$ empleando tanto las técnicas analíticas como la cuadratura de Gauss vista.- ***Solución analítica (exacta)***$$\int_{-1}^1 (x^3+2x^2+1)dx=\left.\frac{x^4}{4}+\frac{2x^3}{3}+x \right |_{-1}^1=\frac{10}{3}$$- ***Aproximación numérica por Cuadratura de Gauss***\begin{equation*}\begin{split}\int_{-1}^1 (x^3+2x^2+1)dx &\approx1f\left(-\frac{\sqrt{3}}{3} \right)+1f\left(\frac{\sqrt{3}}{3} \right) \\&=-\frac{3\sqrt{3}}{27}+\frac{2\times 3}{9}+1+\frac{3\sqrt{3}}{27}+\frac{2\times 3}{9}+1 \\&=2+\frac{4}{3} \\&= \frac{10}{3}\end{split}\end{equation*} [Volver a la Tabla de Contenido](TOC) Cambios de los límites de integración Obsérvese que los límites de integración de la ecuación ([5.47](Ec5_47)) son de $-1$ a $1$. Esto se hizo para simplificar las matemáticas y para hacer la formulación tan general como fuera posible. Asumamos ahora que se desea determinar el valor de la integral entre dos límites cualesquiera $a$ y $b$. Supongamos también, que una nueva variable $x_d$ se relaciona con la variable original $x$ de forma lineal,\begin{equation*}\begin{split}x=a_0+a_1x_d\end{split}\label{eq:Ec5_48} \tag{5.48}\end{equation*}si el límite inferior, $x=a$, corresponde a $x_d=-1$, estos valores podrán sustituirse en la ecuación ([5.48](Ec5_48)) para obtener\begin{equation*}\begin{split}a=a_0+a_1(-1)\end{split}\label{eq:Ec5_49} \tag{5.49}\end{equation*}de manera similar, el límite superior, $x=b$, corresponde a $x_d=1$, para dar\begin{equation*}\begin{split}b=a_0+a_1(1)\end{split}\label{eq:Ec5_50} \tag{5.50}\end{equation*}resolviendo estas ecuaciones simultáneamente,\begin{equation*}\begin{split}a_0=(b+a)/2, \quad a_1=(b-a)/2\end{split}\label{eq:Ec5_51} \tag{5.51}\end{equation*}sustituyendo en la ecuación ([5.48](Ec5_48))\begin{equation*}\begin{split}x=\frac{(b+a)+(b-a)x_d}{2}\end{split}\label{eq:Ec5_52} \tag{5.52}\end{equation*}derivando la ecuación ([5.52](Ec5_52)),\begin{equation*}\begin{split}dx=\frac{b-a}{2}dx_d\end{split}\label{eq:Ec5_53} \tag{5.53}\end{equation*}Las ecuacio es ([5.51](Ec5_51)) y ([5.52](Ec5_52)) se pueden sustituir para $x$ y $dx$, respectivamente, en la evaluación de la integral. Estas sustituciones transforman el intervalo de integración sin cambiar el valor de la integral. En este caso\begin{equation*}\begin{split}\int_a^b f(x)dx = \frac{b-a}{2} \int_{-1}^1 f \left( \frac{(b+a)+(b-a)x_d}{2}\right)dx_d\end{split}\label{eq:Ec5_54} \tag{5.54}\end{equation*}Esta integral se puede aproximar como,\begin{equation*}\begin{split}\int_a^b f(x)dx \approx \frac{b-a}{2} \left[f\left( \frac{(b+a)+(b-a)x_0}{2}\right)+f\left( \frac{(b+a)+(b-a)x_1}{2}\right) \right]\end{split}\label{eq:Ec5_55} \tag{5.55}\end{equation*} [Volver a la Tabla de Contenido](TOC) Fórmulas de punto superior La fórmula anterior para la cuadratura de Gauss era de dos puntos. Se pueden desarrollar versiones de punto superior en la forma general:\begin{equation*}\begin{split}I \approx c_0f(x_0) + c_1f(x_1) + c_2f(x_2) +\ldots+ c_{n-1}f(x_{n-1})\end{split}\label{eq:Ec5_56} \tag{5.56}\end{equation*}con $n$, el número de puntos.Debido a que la cuadratura de Gauss requiere evaluaciones de la función en puntos espaciados uniformemente dentro del intervalo de integración, no es apropiada para casos donde se desconoce la función. Si se conoce la función, su ventaja es decisiva.En la siguiente tabla se presentan los valores de los parámertros para $1, 2, 3, 4$ y $5$ puntos. |$$n$$ | $$c_i$$ | $$x_i$$ ||:----:|:----------:|:-------------:||$$1$$ |$$2.000000$$| $$0.000000$$ ||$$2$$ |$$1.000000$$|$$\pm0.577350$$||$$3$$ |$$0.555556$$|$$\pm0.774597$$|| |$$0.888889$$| $$0.000000$$ ||$$4$$ |$$0.347855$$|$$\pm0.861136$$|| |$$0.652145$$|$$\pm0.339981$$||$$5$$ |$$0.236927$$|$$\pm0.906180$$|| |$$0.478629$$|$$\pm0.538469$$|| |$$0.568889$$| $$0.000000$$ |
###Code
import numpy as np
import pandas as pd
GaussTable = [[[0], [2]], [[-1/np.sqrt(3), 1/np.sqrt(3)], [1, 1]], [[-np.sqrt(3/5), 0, np.sqrt(3/5)], [5/9, 8/9, 5/9]], [[-0.861136, -0.339981, 0.339981, 0.861136], [0.347855, 0.652145, 0.652145, 0.347855]], [[-0.90618, -0.538469, 0, 0.538469, 0.90618], [0.236927, 0.478629, 0.568889, 0.478629, 0.236927]], [[-0.93247, -0.661209, -0.238619, 0.238619, 0.661209, 0.93247], [0.171324, 0.360762, 0.467914, 0.467914, 0.360762, 0.171324]]]
display(pd.DataFrame(GaussTable, columns=["Integration Points", "Corresponding Weights"]))
def IG(f, n):
n = int(n)
return sum([GaussTable[n - 1][1][i]*f(GaussTable[n - 1][0][i]) for i in range(n)])
def f(x): return x**9 + x**8
IG(f, 5.0)
###Output
_____no_output_____
###Markdown
[Volver a la Tabla de Contenido](TOC) Ejemplo Cuadratura de Gauss Determine el valor aproximado de:$$\int_0^1 \frac{4}{1+x^2}dx$$empleando cuadratura gaussiana de dos puntos.Reemplazando los parámetros requeridos en la ecuación ([5.55](Ec5_55)), donde $a=0$, $b=1$, $x_0=-\sqrt{3}/3$ y $x_1=\sqrt{3}/3$\begin{equation*}\begin{split}\int_0^1 f(x)dx &\approx \frac{1-0}{2} \left[f\left( \frac{(1+0)+(1-0)\left(-\frac{\sqrt{3}}{3}\right)}{2}\right)+f\left( \frac{(1+0)+(1-0)\left(\frac{\sqrt{3}}{3}\right)}{2}\right) \right]\\&= \frac{1}{2} \left[f\left( \frac{1-\frac{\sqrt{3}}{3}}{2}\right)+f\left( \frac{1+\frac{\sqrt{3}}{3}}{2}\right) \right]\\&= \frac{1}{2} \left[ \frac{4}{1 + \left( \frac{1-\frac{\sqrt{3}}{3}}{2} \right)^2}+\frac{4}{1 + \left( \frac{1+\frac{\sqrt{3}}{3}}{2} \right)^2} \right]\\&=3.147541\end{split}\end{equation*}Ahora veamos una breve implementación computacional
###Code
import numpy as np
def fxG(a, b, x):
xG = ((b + a) + (b - a) * x) / 2
return funcion(xG)
def GQ2(a,b):
c0 = 1.0
c1 = 1.0
x0 = -1.0 / np.sqrt(3)
x1 = 1.0 / np.sqrt(3)
return (b - a) / 2 * (c0 * fxG(a,b,x0) + c1 * fxG(a,b,x1))
print(GQ2(a,b))
###Output
_____no_output_____
###Markdown
[Volver a la Tabla de Contenido](TOC)
###Code
from IPython.core.display import HTML
def css_styling():
styles = open('./nb_style.css', 'r').read()
return HTML(styles)
css_styling()
###Output
_____no_output_____ |
HighLevelFunctionDemo.ipynb | ###Markdown
It is important to note that the total atmospheric attenuation is not a simple sum of each of the attenuation components:
###Code
#The Total Attenuation
At
#The Gaseous Attenuation Component
Ag
#The Cloud Attenuation Component
Ac
#The Rain Attenuation Component
Ar
#The Scintillation Attenuation Component
As
###Output
_____no_output_____ |
03_Feature_Selection.ipynb | ###Markdown
How-To Guide into Feature Selection IntroductionThis is the third post in my series on transforming data into alpha. If you haven't yet see the [framework overview]() or [feature engineering guide](), please take a minute to read that first... This post is going to delve into the mechanics of _feature selection_, in other words choosing between the many variations of features you've created in the feature engineering stage. By design, many of the features you've created will be very similar to each other (aka "collinear") because you've derived them from the same underlying dataset. MotivationThe previous step of the process, feature engineering, is intended to be a creative, loose process akin to a brainstorming session. The result should be tens (or hundreds) of variations of features to evaluate. However, most models will _generalize_ better (i.e., work well on data they haven't seen) with fewer features. They will also be much more interpretable. Therefore, we need a systematic approach to deciding which of the many posible features to use. That's where the _feature selection_ process comes in. PhilosophyIn feature selection, we strive to meet two goals:1. __Strength__: Choose the features with the strongest, most persistent relationships to the target outcome variable. The reasons for this are obvious.2. __Orthogonality__: Minimize the amount of overlap or collinearity in your selected features. The importance of orthogonality (non-overlap) of features is much greater than you might guess. I am biased towards making feature selection a relatively mechanical process. The "art" should mainly be encapsulated within the prior step (feature engineering) and the subsequent step (modeling). Feature selection should, in my view, follow a heuristic and can be encoded into an algorithm if desired. For purposes of this tutorial, I'll keep things relatively manual. Getting StartedLet's dive in. I will begin by loading the feature set created in the prior step. I'm also going to create the _outcomes_ `DataFrame` as done in the Framework Overview post. Please refer to those if you haven't already.
###Code
import numpy as np
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like # remove once updated pandas-datareader issue is fixed
# https://github.com/pydata/pandas-datareader/issues/534
import pandas_datareader.data as web
%matplotlib inline
def get_symbols(symbols,data_source, begin_date=None,end_date=None):
out = pd.DataFrame()
for symbol in symbols:
df = web.DataReader(symbol, data_source,begin_date, end_date)[['AdjOpen','AdjHigh','AdjLow','AdjClose','AdjVolume']].reset_index()
df.columns = ['date','open','high','low','close','volume'] #my convention: always lowercase
df['symbol'] = symbol # add a new column which contains the symbol so we can keep multiple symbols in the same dataframe
df = df.set_index(['date','symbol'])
out = pd.concat([out,df],axis=0) #stacks on top of previously collected data
return out.sort_index()
prices = get_symbols(['AAPL','CSCO','AMZN','YHOO','MSFT'],data_source='quandl',begin_date='2012-01-01',end_date=None)
prices.sort_index().tail()
outcomes = pd.DataFrame(index=prices.index)
# next day's opening change
outcomes['close_1'] = prices.groupby(level='symbol').close.pct_change(-1) # next day's returns
outcomes['close_5'] = prices.groupby(level='symbol').close.pct_change(-5) # next week's returns
outcomes['close_10'] = prices.groupby(level='symbol').close.pct_change(-10) # next two weeks' returns
outcomes['close_20'] = prices.groupby(level='symbol').close.pct_change(-20) # next month's (approx) returns
outcomes.tail()
###Output
_____no_output_____
###Markdown
For purposes of illustration, we'll engineer some features to contain some signal buried within the noise. Clearly, this is not something we'd do in real usage but will help to demonstrate the concept more clearly. Assume we have a target variable called `outcome` which can be (partially) predicted with three factors, `factor_1`, `factor_2` and `factor_3`. There's also an unpredictble noise component. We will "cheat" and create the overall target variable from these factors. All data will follow the same index as the market data we pulled from quandl.
###Code
num_obs = prices.close.count()
factor_1 = pd.Series(np.random.randn(num_obs),index=prices.index)
factor_2 = pd.Series(np.random.randn(num_obs),index=prices.index)
factor_3 = pd.Series(np.random.randn(num_obs),index=prices.index)
outcome = 1.*factor_1 + 2.*factor_2 + 3.*factor_3 + 5.*np.random.randn(num_obs)
outcome.name = 'outcome'
outcome.tail()
###Output
_____no_output_____
###Markdown
Now, we will engineer several variations on features which each contain some information about the three factors, plus a few which contain some interaction effects, and some which do not contain any useful data. Note that we are, again, "cheating" here for illustration purposes.
###Code
features = pd.DataFrame(index=outcome.index)
features['f11'] = 0.2*factor_1 + 0.8*np.random.randn(num_obs)
features['f12'] = 0.4*factor_1 + 0.6*np.random.randn(num_obs)
features['f13'] = 0.6*factor_1 + 0.4*np.random.randn(num_obs)
features['f21'] = 0.2*factor_2 + 0.8*np.random.randn(num_obs)
features['f22'] = 0.4*factor_2 + 0.8*np.random.randn(num_obs)
features['f23'] = 0.6*factor_2 + 0.4*np.random.randn(num_obs)
features['f31'] = 0.2*factor_3 + 0.8*np.random.randn(num_obs)
features['f32'] = 0.4*factor_3 + 0.6*np.random.randn(num_obs)
features['f33'] = 0.6*factor_3 + 0.4*np.random.randn(num_obs)
features['f41'] = 0.2*factor_1+0.2*factor_2 + 0.6*np.random.randn(num_obs)
features['f42'] = 0.2*factor_2+0.2*factor_3 + 0.6*np.random.randn(num_obs)
features['f43'] = 0.2*factor_3+0.2*factor_1 + 0.6*np.random.randn(num_obs)
features['f51'] = np.random.randn(num_obs)
features['f52'] = np.random.randn(num_obs)
features['f53'] = np.random.randn(num_obs)
features.tail()
###Output
_____no_output_____
###Markdown
Next, we'll import the required packages and modules for the feature selection:
###Code
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import display
from scipy.cluster import hierarchy
from scipy.spatial import distance
from sklearn.preprocessing import StandardScaler,Normalizer
###Output
_____no_output_____
###Markdown
Before evaluating the features for predictive strength and orthogonality, we'll do a quick data preparation stage. It is sometimes vital to "standardize" or "normalize" data so that we get fair comparisons between features of differing scale. Strictly speaking, since all of the doctored outcome and feature data is already drawn from normal distribution (using the numpy function `random.rnorm()`) we don't really need this step, but good practice to include. Here, I'll use the scikit-learn `StandardizeScaler()` method and some pandas magic to transform the data.
###Code
#f = features.dropna() #optional - to compare apples to apples
# standardize or normalize data
std_scaler = StandardScaler()
features_scaled = std_scaler.fit_transform(features.dropna())
print (features_scaled.shape)
df = pd.DataFrame(features_scaled,index=features.dropna().index)
df.columns = features.dropna().columns
df.tail()
# standardize outcome as well
outcome_df = outcome.to_frame()
outcome_scaled = std_scaler.fit_transform(outcome_df.dropna())
outcome_scaled = pd.DataFrame(outcome_scaled,index=outcome_df.dropna().index)
outcome_scaled.columns = outcome_df.columns
outcome_scaled.tail()
corr = df.corrwith(outcome)
corr.sort_values().plot.barh(color = 'blue',title = 'Strength of Correlation')
###Output
_____no_output_____
###Markdown
Pretend for a minute that we don't know which features are going to be stronger and weaker, and which are going to tend to cluster together. We've got an idea that there are some quite strong features, some weaker, and some useless. Next, we'll take advantage of a very handy seaborn chart type called a "clustermap" which plots a heatmap representation of a correlation matrix and runs a clustering algorithm to group together the most closely related features. Of course, the diagonal of dark green represents each feature being perfectly correlated with itself.
###Code
corr_matrix = df.corr()
correlations_array = np.asarray(corr_matrix)
linkage = hierarchy.linkage(distance.pdist(correlations_array), method='average')
g = sns.clustermap(corr_matrix,row_linkage=linkage,col_linkage=linkage,row_cluster=True,\
col_cluster=True,figsize=(10,10),cmap='Greens')
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.show()
label_order = corr_matrix.iloc[:,g.dendrogram_row.reordered_ind].columns
###Output
_____no_output_____
###Markdown
The algorithm has done a good job of finding the groupings of features. The cluster in the upper left captures `factor_1` (including some of the interaction effects). `factor_3` is fairly well isolated in the lower right corner, and in the middle we can see `factor_2` as well as some of the noise features. Let's next focus in only on those features with correlations of greater than 0.1 to exclude the noise and weak features.
###Code
correlated_features = corr[corr>0.1].index.tolist()
corr_matrix = df[correlated_features].corr()
correlations_array = np.asarray(corr_matrix)
linkage = hierarchy.linkage(distance.pdist(correlations_array), method='average')
g = sns.clustermap(corr_matrix,row_linkage=linkage,col_linkage=linkage,row_cluster=True,\
col_cluster=True,figsize=(6,6),cmap='Greens')
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.show()
label_order = corr_matrix.iloc[:,g.dendrogram_row.reordered_ind].columns
print("Correlation Strength:")
print(corr[corr>0.1].sort_values(ascending=False))
###Output
_____no_output_____
###Markdown
Ah, now the clusters look a bit sharper. We'll follow a simple heuristic to manually select the features. Those wishing to take this to the next level can decide how to encapsulate into an algorithm. 1. Take the most strongly correlated feature (f33) and add it to our list of selected features.2. Take the second correlated feature (f23) and check to see if it's closely correlated (neighboring in the clustermap) to any features already chosen. If no, add to the list. If yes, discard.3. Repeat this process until either (1) we've reached the target feature count, or (2) we've run out strongly correlated features. Following that heuristic, I get:
###Code
selected_features = ['f33','f23','f42','f41','f31']
###Output
_____no_output_____
###Markdown
Note that this list of features is not simply the highest correlated features. Let's run the clustermap one more time to see if we've missed any major clusters.
###Code
corr_matrix = df[selected_features].corr()
correlations_array = np.asarray(corr_matrix)
linkage = hierarchy.linkage(distance.pdist(correlations_array), method='average')
g = sns.clustermap(corr_matrix,row_linkage=linkage,col_linkage=linkage,row_cluster=True,\
col_cluster=True,figsize=(6,6),cmap='Greens')
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.show()
label_order = corr_matrix.iloc[:,g.dendrogram_row.reordered_ind].columns
###Output
_____no_output_____
###Markdown
Looks generally pretty good. This can be a bit subjective to determine what's "too close" and what's "too weak", but that's the basic idea. Thus far, we've only taken a simple correlation statistic to be representative of predictive power. In my opinion, that's a good place to start but because financial time series data suffers from [non-stationarity]() and [regime change](), we'll plot the rolling correlation of these selected features to see if any is either (1) less correlated now than in times past or (2) very "hot-and-cold".
###Code
tmp = df[selected_features].join(outcome_scaled).reset_index().set_index('date')
tmp.dropna().resample('Q').apply(lambda x: x.corr()).iloc[:,-1].unstack().iloc[:,:-1].plot()
# shows time stability
###Output
_____no_output_____
###Markdown
As expected, since the data wasn't modeled with any non-stationarity, our features all appear to be robust over time. Z-ScoresA very popular/useful transformation for financial time series data is the [z-score](http://stattrek.com/statistics/dictionary.aspx?definition=z-score). We can easily define a generalized lambda function for this, which we can use whenever needed. Importantly, it allows us to mix together very different symbols (some high-beta, some low-beta) in a way that considers the statistical significance of any movement.
###Code
zscore_fxn = lambda x: (x - x.mean()) / x.std()
features['f09'] =prices.groupby(level='symbol').close.apply(zscore_fxn)
features.f09.unstack().plot.kde(title='Z-Scores (not quite accurate)')
###Output
_____no_output_____
###Markdown
However, the above example has a subtle but important bug. It uses the mean _of the whole time frame_ and the standard deviation _of the whole time frame_ to calculate each datapoint. This means we are peeking ahead into the future and the feature is potentially very danger-prone (it'll work famously well in sample and fail to work out of sample...).Fixing this is cumbersome, but necessary.
###Code
zscore_fun_improved = lambda x: (x - x.rolling(window=200, min_periods=20).mean())/ x.rolling(window=200, min_periods=20).std()
features['f10'] =prices.groupby(level='symbol').close.apply(zscore_fun_improved)
features.f10.unstack().plot.kde(title='Z-Scores (Correct)')
###Output
_____no_output_____
###Markdown
PercentileLess commonly used - but equally useful - is the percentile transformation. Getting this done properly in pandas (with groupby and rolling) is possible but tricky. The below example returns the percentile rank (from 0.00 to 1.00) of traded volume for each value as compared to a trailing 200 day period. Note that we need to use _a lambda within a lambda_ to make this work properly. We're on the bleeding edge.
###Code
rollrank_fxn = lambda x: x.rolling(200,min_periods=20).apply(lambda x: pd.Series(x).rank(pct=True)[0],raw=True)
features['f11'] = prices.groupby(level='symbol').volume.apply(rollrank_fxn)
###Output
_____no_output_____
###Markdown
Another interesting application of this same pattern is to rank each stock _cross-sectionally_ rather than _longitudinally_ as above. In other words, where does this stock rank within all of the stocks on that day, not for all prior days of that stock. The below example isn't very meaningful with only two stocks, but quite useful when using a realistic universe. In this example, we're also making use of an earlier feature (relative volume) to compare which symbol is most heavily traded _for that stock's normal range_ in a given day. Also note that we need to `dropna()` prior to ranking because `rank` doesn't handle nulls very gracefully.
###Code
features['f12'] = features['f07'].dropna().groupby(level='date').rank(pct=True)
###Output
_____no_output_____
###Markdown
Technical AnalysisThose with a taste for technical analysis may find it difficult to let go of your favored TA techniques. While this is not _my_ favored approach, you'll have no problem engineering features using these methods. From my cursory googling, it looked as though the `ta` package would be a good place to start. Very new and only one contributor but it looks fairly complete and well documented. If you find that it's missing your favorite indicators, consider contributing to the package. If you know of better such packages, please post in the comments below... You may consider mean-centering a technical indicator so that machine learning methods can make better use of the data (or make sure to include that in the pre-processing pipeline when you start modeling).
###Code
import ta # technical analysis library: https://technical-analysis-library-in-python.readthedocs.io/en/latest/
# money flow index (14 day)
features['f13'] = ta.momentum.money_flow_index(prices.high, prices.low, prices.close, prices.volume, n=14, fillna=False)
# mean-centered money flow index
features['f14'] = features['f13'] - features['f13'].rolling(200,min_periods=20).mean()
###Output
_____no_output_____
###Markdown
Alternative RepresentationsA bit different than transforms are "representations", i.e., other ways to represent continuous values. All of the transforms above returned continuous values rather than "labels", and that's often a good place to start - especally for early prototypes.However, you may want to represent the data in different ways, especially if using classification-based approaches or worried about the [curse of dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality) due to large numbers of features. BinningWe can easily convert a continous variable to discrete "bins" (like 1 to 10). This loses information, of course, but sometimes loss of information is a good thing if you are removing more noise than signal. The below example shows volumes converted into ten equally sized buckets. In other words, we've converted a continuous variable into a discrete one. NOTE: this example is not applied in a rolling fashion, so it __does suffer from some data peeking__, a cardinal sin. At the moment, I'm failing in my efforts to implement it in a rolling way. I'd be grateful for code snippets if anyone knows how to do this offhand.
###Code
n_bins = 10
bin_fxn = lambda y: pd.qcut(y,q=n_bins,labels = range(1,n_bins+1))
features['f15'] = prices.volume.groupby(level='symbol').apply(bin_fxn)
###Output
_____no_output_____
###Markdown
SignVery simply, you may wish to convert continuous variables into positive or negative (1 or -1) values, depending on input. For instance, was volume increasing or decreasing today?
###Code
features['f16'] = features['f05'].apply(np.sign)
###Output
_____no_output_____
###Markdown
Plus-MinusYou may be interested in how many days in a row a value has increased (or decreased). Below is a simple pattern to do just that - it calculates the number of up-days minus the number of down days.
###Code
plus_minus_fxn = lambda x: x.rolling(20).sum()
features['f17'] = features['f16'].groupby(level='symbol').apply(plus_minus_fxn)
###Output
_____no_output_____
###Markdown
One-Hot EncodingPossibly the most frequently used alternative representation is "one-hot encoding" where a categorical variable is represented as a binary. For instance, month_of_year would be represented as twelve different columns, each of which was either 0 or 1. January would be [1,0,0,0,...0] etc... This is absolutely crucial in a few circumstances. The first is where there is false meaning in the "ordinality" of values. If we were looking to test the "santa claus effect" hypothesis, it wouldn't be helpful to use a month_of_year feature where January was "the least" and December was "the most". The second is in cases where we are representing events or "states". Does the word "lawsuit" appear within the 10-Q footnotes? Is the company in the blackout period for share buybacks? Finally, the particular machine learning algorithm (tree-based, neural networks) may find it easier to use binary representations than continuous or discrete ones. The below example creates twelve one-hot features, one for each month, and names them automatically
###Code
month_of_year = prices.index.get_level_values(level='date').month
one_hot_frame = pd.DataFrame(pd.get_dummies(month_of_year))
one_hot_frame.index = prices.index # Careful! This is forcing index values without usual pandas alignments!
# create column names
begin_num = int(features.columns[-1][-2:]) + 1 #first available feature
feat_names = ['f'+str(num) for num in list(range(begin_num,begin_num+12,1))]
# rename columns and merge
one_hot_frame.columns = feat_names
features = features.join(one_hot_frame)
###Output
_____no_output_____
###Markdown
Data CleansingOK, I've put this off long enough. It's time to cover the least interesting and possibly most critical aspect of feature engineering... data cleansing! Many will include data cleansing as part of the raw data collection pipeline rather than the feature engineering step - and I can't argue with cleansing data as early in the process as possible. However, your data can never be too clean so I take the "belt and suspenders" approach. Clean your data on collection, clean on usage. Clean, clean, clean! The motivation for * to_datetime, to_numeric, astype() (int, string, float...)* fillna(ffill, 0, mean) Data TypingIf you've spent any time with data work in python, you're already familiar with the sometimes annoying data typing issues of a "duck typed" language. Pandas does an admirable job of inferring types from your data but you'll sometimes want to exercise more control to make sure your data is perfect. The first data typing issue I face is representation of dates and times, which can be represented in several different formats. I prefer to standardize all datetimes using the pandas pd.to_datetime() method which yields two main benefits: (1) you will be able to align and join multiple datetime values together and (2) you'll be able to take advantage of the many pandas date/time functions.Example:
###Code
## code of casting to datetime, selecting weekday etc...
###Output
_____no_output_____
###Markdown
If you fail to control your datetime typing, you'll inevitably end up with difficulty in aligning and joining data on date, like this:
###Code
# example of a str and a datetime repr which are joined on axis=1 and result in an awkward dataframe
###Output
_____no_output_____
###Markdown
Among the pandas date/time functions is a very useful resampling method, which allows you to aggregate from a higher frequency (e.g., hourly) to a lower frequency (e.g., daily or weekly or monthly). Depending on the timeframe of your strategy, you may seek to resample everything to a lower frequency
###Code
## example of resampling
###Output
_____no_output_____
###Markdown
The other main typing issue I find is with numeric types. Number values are commonly represented as integers, floats, and strings which look like integers or floats. Pandas attempts to guess the right type for data when it's loaded (via `read_csv` or `read_sql` etc..). Problems arise when there are some values within a column which don't follow the type .The below example illustrates how
###Code
df = pd.DataFrame({'symbol':['a','b','c','d','e'],'price':[1,2,3,4,'None']})
print(df)
print()
print('Average: ',df.mean()) # no results
print()
print('######################')
# retype to numeric
print()
df['price'] = pd.to_numeric(df.price,errors='coerce')
print(df)
print()
print('Average: ',df.mean()) # works
###Output
symbol price
0 a 1
1 b 2
2 c 3
3 d 4
4 e None
Average: Series([], dtype: float64)
######################
symbol price
0 a 1.0
1 b 2.0
2 c 3.0
3 d 4.0
4 e NaN
Average: price 2.5
dtype: float64
###Markdown
Handling Missing DataIncomplete data is a reality for us all. Whether it's because some input sources are of a lower frequency, shorter history (i.e., don't go back as far in time) or have unexplained unavailable data points at times, we need a thoughtful approach for addressing missing data.Most machine learning algorithms require a valid value for each feature at each observation point (or they will fail to run...). If we don't apply some sensible workarounds, we'll end up dropping lots of _valid_ data points because of a single missing feature. Before outlining the tactics and code patterns we can apply, my core principles for data cleansing are:1. Always try to reflect the data you might have applied _at the time_ of the missing data point. In other words, don't peek into the future if at all possible. 2. Drop valid data only as a last resort (and as late in the process as possible). 3. Questionable data (i.e., extreme outliers) should be treated like missing data.
###Code
### Formatting
###Output
_____no_output_____
###Markdown
Whew! That was (much) longer than intended. Feature engineering is a broad subject of which I've only scratched the surface. Hopefully this will provide you with a framework and starting point to get your own process up and running so that you can focus on applying your creativity and your expertise on the subject matter of choice.In the next post of this series, I will outline a process [feature selection]() - the next logical step following feature engineering. Questions, comments, or suggestions are welcomed below.
###Code
import numpy as np
arrays = [np.array([1,2,3,4,1,2,3,4]),np.array(['bar', 'bar', 'bar', 'bar', 'foo', 'foo', 'foo', 'foo'])]
s = pd.Series(np.array([100,101,102,103,200,201,202,203]), index=arrays)
s.name='values'
df = pd.DataFrame(s, index=arrays).sort_index()
df.index.names =['day','symbol']
print(df)
print(df.groupby(level='symbol').values.diff())
print(df.groupby(level='symbol').values.pct_change())
my_func = lambda x: x.pct_change()
print(df.groupby(level='symbol').values.apply(my_func))
print(df.groupby(level='symbol').values.diff() / df.groupby(level='symbol').values.shift(1))
###Output
values
day symbol
1 bar 100
foo 200
2 bar 101
foo 201
3 bar 102
foo 202
4 bar 103
foo 203
day symbol
1 bar NaN
foo NaN
2 bar 1.0
foo 1.0
3 bar 1.0
foo 1.0
4 bar 1.0
foo 1.0
Name: values, dtype: float64
day symbol
1 bar NaN
foo 1.000000
2 bar -0.495000
foo 0.990099
3 bar -0.492537
foo 0.980392
4 bar -0.490099
foo 0.970874
Name: values, dtype: float64
day symbol
1 bar NaN
foo NaN
2 bar 0.010000
foo 0.005000
3 bar 0.009901
foo 0.004975
4 bar 0.009804
foo 0.004950
Name: values, dtype: float64
day symbol
1 bar NaN
foo NaN
2 bar 0.010000
foo 0.005000
3 bar 0.009901
foo 0.004975
4 bar 0.009804
foo 0.004950
Name: values, dtype: float64
|
tests/TestAsync.ipynb | ###Markdown
When running in a separate thread no error is raised
###Code
ipytest.config(run_in_thread=True)
ipytest.run()
###Output
_____no_output_____
###Markdown
When running in a separate thread no error is raised
###Code
ipytest.config(run_in_thread=True)
ipytest.run()
###Output
_____no_output_____ |
JWH_assignment_DS_222.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 2, Module 2*--- Random Forests Assignment- [ ] Read [“Adopting a Hypothesis-Driven Workflow”](https://outline.com/5S5tsB), a blog post by a Lambda DS student about the Tanzania Waterpumps challenge.- [ ] Continue to participate in our Kaggle challenge.- [ ] Define a function to wrangle train, validate, and test sets in the same way. Clean outliers and engineer features.- [ ] Try Ordinal Encoding.- [ ] Try a Random Forest Classifier.- [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)- [ ] Commit your notebook to your fork of the GitHub repo. Stretch Goals Doing- [ ] Add your own stretch goal(s) !- [ ] Do more exploratory data analysis, data cleaning, feature engineering, and feature selection.- [ ] Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/).- [ ] Get and plot your feature importances.- [ ] Make visualizations and share on Slack. ReadingTop recommendations in _**bold italic:**_ Decision Trees- A Visual Introduction to Machine Learning, [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/), and _**[Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/)**_- [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.htmladvantages-2)- [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/)- [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html)- [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU) Random Forests- [_An Introduction to Statistical Learning_](http://www-bcf.usc.edu/~gareth/ISL/), Chapter 8: Tree-Based Methods- [Coloring with Random Forests](http://structuringtheunstructured.blogspot.com/2017/11/coloring-with-random-forests.html)- _**[Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/)**_ Categorical encoding for trees- [Are categorical variables getting lost in your random forests?](https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/)- [Beyond One-Hot: An Exploration of Categorical Variables](http://www.willmcginnis.com/2015/11/29/beyond-one-hot-an-exploration-of-categorical-variables/)- _**[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)**_- _**[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)**_- [Mean (likelihood) encodings: a comprehensive study](https://www.kaggle.com/vprokopev/mean-likelihood-encodings-a-comprehensive-study)- [The Mechanics of Machine Learning, Chapter 6: Categorically Speaking](https://mlbook.explained.ai/catvars.html) Imposter Syndrome- [Effort Shock and Reward Shock (How The Karate Kid Ruined The Modern World)](http://www.tempobook.com/2014/07/09/effort-shock-and-reward-shock/)- [How to manage impostor syndrome in data science](https://towardsdatascience.com/how-to-manage-impostor-syndrome-in-data-science-ad814809f068)- ["I am not a real data scientist"](https://brohrer.github.io/imposter_syndrome.html)- _**[Imposter Syndrome in Data Science](https://caitlinhudon.com/2018/01/19/imposter-syndrome-in-data-science/)**_ More Categorical Encodings**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:- **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.- **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).**2.** The short video **[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.Category Encoders has multiple implementations of this general concept:- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)Category Encoder's mean encoding implementations work for regression problems or binary classification problems. For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:```pythonencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) Both parameters > 1 to avoid overfittingX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')X_val_encoded = encoder.transform(X_train, y_val=='functional')```For this reason, mean encoding won't work well within pipelines for multi-class classification problems.**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.```python dirty_cat.TargetEncoder(clf_type='multiclass-clf')```It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals._**I hope it’s not too frustrating or confusing that there’s not one “canonical” way to encode categoricals. It’s an active area of research and experimentation! Maybe you can make your own contributions!**_ SetupYou can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab (run the code cell below).
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set_style('darkgrid')
import category_encoders as ce
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
#import pandas as pd
#from sklearn.model_selection import train_test_split
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
train.shape, test.shape
features = ['basin', 'public_meeting', 'scheme_management', 'permit', 'extraction_type',
'management', 'payment', 'water_quality', 'quantity', 'source', 'waterpoint_type']
target = 'status_group'
def artpipe(train, test):
X_train, X_valid, y_train, y_valid = train_test_split(train[features], train[target])
X_test = test[features]
encoder = ce.OrdinalEncoder()
X_train = encoder.fit_transform(X_train)
X_valid = encoder.transform(X_valid)
X_test = encoder.transform(X_test)
model = RandomForestClassifier(n_estimators = 144, max_depth = 12, n_jobs = -1)
model.fit(X_train, y_train)
yv_pred = model.predict(X_valid)
yt_pred = model.predict(X_test)
return yt_pred
submission = sample_submission.copy()
submission['status_group'] = artpipe(train, test)
submission.to_csv('submission.csv', index=False)
###Output
_____no_output_____ |
3 - Naïve Bayes’ Classifier/hw03.ipynb | ###Markdown
Read Data
###Code
images = np.genfromtxt('hw03_data_set_images.csv',delimiter=',').reshape(5, 39, 320)
labels = np.char.strip(np.genfromtxt('hw03_data_set_labels.csv',delimiter=',',dtype=str),'"').reshape(5,39)
for index, key in enumerate(['A', 'B', 'C', 'D', 'E']):
labels[labels == key] = index + 1
labels = labels.astype(int)
training_images = images[:,:25,:].reshape(125, 320)
training_l = labels[:,:25].reshape(125,)
test_images = images[:,25:,:].reshape(70, 320)
test_l = labels[:,25:].reshape(70,)
K = np.max(training_l)
N_train = training_l.shape[0]
N_test = test_l.shape[0]
training_labels = np.zeros((N_train, K)).astype(int)
training_labels[range(N_train), training_l - 1] = 1
test_labels = np.zeros((N_test, K)).astype(int)
test_labels[range(N_test), test_l - 1] = 1
pcd = []
for i in range(5):
pcd.append(np.sum(training_images[i*25:(i+1)*25], axis=0).flatten()/(training_images.shape[0]/K))
print("pcd[0] => ", pcd[0][:10], "...")
print("pcd[1] => ", pcd[1][:10], "...")
print("pcd[2] => ", pcd[2][:10], "...")
print("pcd[3] => ", pcd[3][:10], "...")
print("pcd[4] => ", pcd[4][:10], "...")
fig, axs = plt.subplots(1,5,figsize=(15,15), sharey=True)
for i in range(5):
axs[i].imshow(pcd[i].reshape(16,20).T, cmap="Greys",interpolation='none')
plt.show()
def score_func(x, pcd):
return [np.dot(x[i].T, safelog(pcd[c])) + np.dot((1-x[i].T), safelog(1 - pcd[c])) + safelog(0.2) for c in range(5)]
training_scores = np.zeros((125,5))
for i in range(125):
training_scores[i] = score_func(training_images, pcd)
y_predicted = np.argmax(training_scores, axis = 1) + 1
confusion_matrix = pd.crosstab(y_predicted, np.sum(training_labels * np.array([1,2,3,4,5]), axis=1), rownames = ['y_predicted'], colnames = ['y_train'])
print("\n",confusion_matrix)
print("\n====================")
print("\n====================")
test_scores = np.zeros((70,5))
for i in range(70):
test_scores[i] = score_func(test_images, pcd)
y_predicted_test = np.argmax(test_scores, axis = 1) + 1
confusion_matrix = pd.crosstab(y_predicted_test, np.sum(test_labels * np.array([1,2,3,4,5]), axis=1), rownames = ['y_predicted'], colnames = ['y_test'])
print(confusion_matrix)
###Output
y_test 1 2 3 4 5
y_predicted
1 7 0 0 0 0
2 0 11 3 2 4
3 0 0 7 0 0
4 7 3 3 12 0
5 0 0 1 0 10
|
tutorials/old_generation_notebooks/jupyter/3- Build your own French POS tagger.ipynb | ###Markdown
 Train POS Tagger in French by Spark NLP Based on Universal Dependency `UD_French-GSD`
###Code
import sys
import time
#Spark ML and SQL
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.functions import array_contains
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
#Spark NLP
import sparknlp
from sparknlp.annotator import *
from sparknlp.common import RegexRule
from sparknlp.base import DocumentAssembler, Finisher
###Output
_____no_output_____
###Markdown
Let's create a Spark Session for our app
###Code
spark = sparknlp.start()
print("Spark NLP version: ", sparknlp.version())
print("Apache Spark version: ", spark.version)
###Output
Spark NLP version: 2.6.0
Apache Spark version: 2.4.4
###Markdown
Let's prepare our training datasets containing `token_posTag` like `de_DET`. You can download this data set from Amazon S3:```wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp```
###Code
! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp
from sparknlp.training import POS
training_data = POS().readDataset(spark, '/tmp/UD_French-GSD_2.3.txt', '_', 'tags')
training_data.show()
document_assembler = DocumentAssembler() \
.setInputCol("text")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")\
.setExceptions(["jusqu'", "aujourd'hui", "États-Unis", "lui-même", "celui-ci", "c'est-à-dire", "celle-ci", "au-dessus", "etc.", "sud-est", "Royaume-Uni", "ceux-ci", "au-delà", "elle-même", "peut-être", "sud-ouest", "nord-ouest", "nord-est", "Etats-Unis", "Grande-Bretagne", "Pays-Bas", "eux-mêmes", "porte-parole", "Notre-Dame", "puisqu'", "week-end", "quelqu'un", "celles-ci", "chef-lieu"])\
.setPrefixPattern("\\A([^\\s\\p{L}\\d\\$\\.#]*)")\
.setSuffixPattern("([^\\s\\p{L}\\d]?)([^\\s\\p{L}\\d]*)\\z")\
.setInfixPatterns([
"([\\p{L}\\w]+'{1})",
"([\\$#]?\\d+(?:[^\\s\\d]{1}\\d+)*)",
"((?:\\p{L}\\.)+)",
"((?:\\p{L}+[^\\s\\p{L}]{1})+\\p{L}+)",
"([\\p{L}\\w]+)"
])
posTagger = PerceptronApproach() \
.setNIterations(1) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("pos") \
.setPosCol("tags")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
posTagger
])
# Let's train our Pipeline by using our training dataset
model = pipeline.fit(training_data)
###Output
_____no_output_____
###Markdown
This is our testing DataFrame where we get some sentences in French. We are going to use our trained Pipeline to transform these sentence and predict each token's `Part Of Speech`.
###Code
dfTest = spark.createDataFrame([
"Je sens qu'entre ça et les films de médecins et scientifiques fous que nous avons déjà vus, nous pourrions emprunter un autre chemin pour l'origine.",
"On pourra toujours parler à propos d'Averroès de décentrement du Sujet."
], StringType()).toDF("text")
predict = model.transform(dfTest)
predict.select("token.result", "pos.result").show(truncate=50)
###Output
+--------------------------------------------------+--------------------------------------------------+
| result| result|
+--------------------------------------------------+--------------------------------------------------+
|[Je, sens, qu'entre, ça, et, les, films, de, mé...|[PRON, NOUN, ADP, PRON, CCONJ, DET, NOUN, ADP, ...|
|[On, pourra, toujours, parler, à, propos, d'Ave...|[PRON, VERB, ADV, VERB, ADP, NOUN, ADJ, ADP, NO...|
+--------------------------------------------------+--------------------------------------------------+
###Markdown
 Train POS Tagger in French by Spark NLP Based on Universal Dependency `UD_French-GSD`
###Code
import sys
import time
#Spark ML and SQL
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.functions import array_contains
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
#Spark NLP
import sparknlp
from sparknlp.annotator import *
from sparknlp.common import RegexRule
from sparknlp.base import DocumentAssembler, Finisher
###Output
_____no_output_____
###Markdown
Let's create a Spark Session for our app
###Code
spark = sparknlp.start()
print("Spark NLP version: ", sparknlp.version())
print("Apache Spark version: ", spark.version)
###Output
_____no_output_____
###Markdown
Let's prepare our training datasets containing `token_posTag` like `de_DET`. You can download this data set from Amazon S3:```wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp```
###Code
! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp
from sparknlp.training import POS
training_data = POS().readDataset(spark, '/tmp/UD_French-GSD_2.3.txt', '_', 'tags')
training_data.show()
document_assembler = DocumentAssembler() \
.setInputCol("text")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")\
.setExceptions(["jusqu'", "aujourd'hui", "États-Unis", "lui-même", "celui-ci", "c'est-à-dire", "celle-ci", "au-dessus", "etc.", "sud-est", "Royaume-Uni", "ceux-ci", "au-delà", "elle-même", "peut-être", "sud-ouest", "nord-ouest", "nord-est", "Etats-Unis", "Grande-Bretagne", "Pays-Bas", "eux-mêmes", "porte-parole", "Notre-Dame", "puisqu'", "week-end", "quelqu'un", "celles-ci", "chef-lieu"])\
.setPrefixPattern("\\A([^\\s\\p{L}\\d\\$\\.#]*)")\
.setSuffixPattern("([^\\s\\p{L}\\d]?)([^\\s\\p{L}\\d]*)\\z")\
.setInfixPatterns([
"([\\p{L}\\w]+'{1})",
"([\\$#]?\\d+(?:[^\\s\\d]{1}\\d+)*)",
"((?:\\p{L}\\.)+)",
"((?:\\p{L}+[^\\s\\p{L}]{1})+\\p{L}+)",
"([\\p{L}\\w]+)"
])
posTagger = PerceptronApproach() \
.setNIterations(1) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("pos") \
.setPosCol("tags")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
posTagger
])
# Let's train our Pipeline by using our training dataset
model = pipeline.fit(training_data)
###Output
_____no_output_____
###Markdown
This is our testing DataFrame where we get some sentences in French. We are going to use our trained Pipeline to transform these sentence and predict each token's `Part Of Speech`.
###Code
dfTest = spark.createDataFrame([
"Je sens qu'entre ça et les films de médecins et scientifiques fous que nous avons déjà vus, nous pourrions emprunter un autre chemin pour l'origine.",
"On pourra toujours parler à propos d'Averroès de décentrement du Sujet."
], StringType()).toDF("text")
predict = model.transform(dfTest)
predict.select("token.result", "pos.result").show(truncate=50)
###Output
_____no_output_____ |
PARTE 1.2. Web Scraping gilmar.ipynb | ###Markdown
 > **WEB SCRAPER**_Inmobiliaria Gilmar. ------ PASO 0. IMPORTAR BIBLIOTECAS.
###Code
from bs4 import BeautifulSoup
from selenium import webdriver
import pandas as pd
from time import sleep, strftime
###Output
_____no_output_____
###Markdown
------ PASO 1. FUNCIÓN PARA SACAR INFORMACIÓN DE LA WEB.
###Code
def sacar_info(url):
chrome_driver = "C:/Users/nuria/Bootcamp Data Science/02. Febrero/chromedriver"
options = webdriver.ChromeOptions()
options.add_argument('--incognito')
driver = webdriver.Chrome(executable_path=chrome_driver,options = options)
driver.get(url)
# Sacamos las características de cada vivienda.
zona_barrio = driver.find_element_by_xpath("//p[@class = 'zona']").text
precio = driver.find_element_by_xpath("//div[@class = 'referencia']/p[3]").text.split(": ")[1]
precio_m2 = driver.find_element_by_xpath("//div[@class = 'referencia']/p[4][1]").text.split(": ")[1]
m2_construidos = driver.find_element_by_xpath("//ul[2]/li[1]/span").text
habs = driver.find_element_by_xpath("//ul[2]/li[2]/span").text.split(" ")[0]
baños = driver.find_element_by_xpath("//ul[2]/li[3]/span").text.split(" ")[0]
terraza = driver.find_element_by_xpath("//ul[2]/li[4]/span").text.split(", ")[0]
trastero = driver.find_element_by_xpath("//ul[2]/li[5]/span").text
garaje = driver.find_element_by_xpath("//ul[2]/li[6]/span").text.split(",")[0]
return zona_barrio, precio, precio_m2, m2_construidos, habs, baños, terraza, trastero, garaje
###Output
_____no_output_____
###Markdown
------ PASO 2. SACAMOS INFORMACIÓN DE CADA BARRIO. 1. Centro.
###Code
urls_1centro = []
for i in range(1,103):
try:
urls_1centro.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/centro/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_1centro = []
for i in urls_1centro:
info_piso = sacar_info(i)
lista_1centro.append(info_piso)
df_1_centro = pd.DataFrame(lista_1centro, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_1_centro.to_excel("df_1_centro.xlsx",index=True)
###Output
_____no_output_____
###Markdown
2. Arganzuela.
###Code
urls_2arganzuela = []
for i in range(1,47):
try:
urls_2arganzuela.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/arganzuela/gilmar(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_2arganzuela = []
for i in urls_2arganzuela:
info_piso = sacar_info(i)
lista_2arganzuela.append(info_piso)
df_2_arganzuela = pd.DataFrame(lista_2arganzuela, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_2_arganzuela.to_excel("df_2_arganzuela.xlsx",index=True)
###Output
_____no_output_____
###Markdown
3. Retiro.
###Code
urls_3retiro = []
for i in range(1,85):
try:
urls_3retiro.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/retiro/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_3retiro = []
for i in urls_3retiro:
info_piso = sacar_info(i)
lista_3retiro.append(info_piso)
df_3_retiro = pd.DataFrame(lista_3retiro, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_3_retiro.to_excel("df_3_retiro.xlsx",index=True)
###Output
_____no_output_____
###Markdown
4. Salamanca.
###Code
urls_4salamanca = []
for i in range(1,152):
try:
urls_4salamanca.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/salamanca/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_4salamanca = []
for i in urls_4salamanca:
info_piso = sacar_info(i)
lista_4salamanca.append(info_piso)
df_4_salamanca = pd.DataFrame(lista_4salamanca, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_4_salamanca.to_excel("df_4_salamanca.xlsx",index=True)
###Output
_____no_output_____
###Markdown
5. Chamartín.
###Code
urls_5chamartin = []
for i in range(1,107):
try:
urls_5chamartin.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/chamartin/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_5chamartin = []
for i in urls_5chamartin:
info_piso = sacar_info(i)
lista_5chamartin.append(info_piso)
df_5_chamartin = pd.DataFrame(lista_5chamartin, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_5_chamartin.to_excel("df_5_chamartin.xlsx",index=True)
###Output
_____no_output_____
###Markdown
6. Tetuán. (No hay pisos de este barrio en gilmar) 7. Chamberí.
###Code
urls_7chamberi = []
for i in range(1,53):
try:
urls_7chamberi.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/chamberi/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_7chamberi = []
for i in urls_7chamberi:
info_piso = sacar_info(i)
lista_7chamberi.append(info_piso)
df_7_chamberi = pd.DataFrame(lista_7chamberi, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_5_chamartin.to_excel("df_5_chamartin.xlsx",index=True)
###Output
_____no_output_____
###Markdown
8. Fuencarral.
###Code
urls_8fuencarral = []
for i in range(1,29):
try:
urls_8fuencarral.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/fuencarral/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_8fuencarral = []
for i in urls_8fuencarral:
info_piso = sacar_info(i)
lista_8fuencarral.append(info_piso)
df_8_fuencarral = pd.DataFrame(lista_8fuencarral, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_8_fuencarral.to_excel("df_8_fuencarral.xlsx",index=True)
###Output
_____no_output_____
###Markdown
9. Fuencarral.
###Code
urls_9moncloa = []
for i in range(1,32):
try:
urls_9moncloa.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/moncloa/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_9moncloa = []
for i in urls_9moncloa:
info_piso = sacar_info(i)
lista_9moncloa.append(info_piso)
df_9_moncloa = pd.DataFrame(lista_9moncloa, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_9_moncloa.to_excel("df_9_moncloa.xlsx",index=True)
###Output
_____no_output_____
###Markdown
10. Latina.
###Code
for i in range(1,12):
try:
urls_10latina.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/latina/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_10latina = []
for i in urls_10latina:
info_piso = sacar_info(i)
lista_10latina.append(info_piso)
df_10_latina = pd.DataFrame(lista_10latina, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_10_latina.to_excel("df_10_latina.xlsx",index=True)
###Output
_____no_output_____
###Markdown
11. Carabanchel.
###Code
urls_11carabanchel = []
for i in range(1,27):
try:
urls_11carabanchel.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/carabanchel/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_11carabanchel = []
for i in urls_11carabanchel:
info_piso = sacar_info(i)
lista_11carabanchel.append(info_piso)
df_11_carabanchel = pd.DataFrame(lista_11carabanchel, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_11_carabanchel.to_excel("df_11_carabanchel.xlsx",index=True)
###Output
_____no_output_____
###Markdown
12. Usera. (No hay pisos de este barrio en gilmar) 13. Puente de Vallecas.
###Code
urls_13ptevallecas = []
for i in range(1,7):
try:
urls_13ptevallecas.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/puente%20de%20vallecas/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_13ptevallecas = []
for i in urls_13ptevallecas:
info_piso = sacar_info(i)
lista_13ptevallecas.append(info_piso)
df_13_ptevallecas = pd.DataFrame(lista_13ptevallecas, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_13_ptevallecas.to_excel("df_13_ptevallecas.xlsx",index=True)
###Output
_____no_output_____
###Markdown
14. Moratalaz.
###Code
urls_14moratalaz = []
for i in range(1,4):
try:
urls_14moratalaz.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/moratalaz/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_14moratalaz = []
for i in urls_14moratalaz:
info_piso = sacar_info(i)
lista_14moratalaz.append(info_piso)
df_14_moratalaz = pd.DataFrame(lista_14moratalaz, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_14_moratalaz.to_excel("df_14_moratalaz.xlsx",index=True)
###Output
_____no_output_____
###Markdown
15. Ciudad Lineal.
###Code
urls_15clineal = []
for i in range(1,37):
try:
urls_15clineal.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/ciudad%20lineal/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_15clineal = []
for i in urls_15clineal:
info_piso = sacar_info(i)
lista_15clineal.append(info_piso)
df_15_clineal = pd.DataFrame(lista_15clineal, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_15_clineal.to_excel("df_15_clineal.xlsx",index=True)
###Output
_____no_output_____
###Markdown
16. Hortaleza. (No hay pisos de este barrio en gilmar) 17. Villaverde.
###Code
urls_17villaverde = []
for i in range(1,7):
try:
urls_17villaverde.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/villaverde/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_17villaverde = []
for i in urls_17villaverde:
info_piso = sacar_info(i)
lista_17villaverde.append(info_piso)
df_17_villaverde = pd.DataFrame(lista_17villaverde, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_17_villaverde.to_excel("df_17_villaverde.xlsx",index=True)
###Output
_____no_output_____
###Markdown
18. Villa de vallecas.
###Code
urls_18villavallecas = []
for i in range(1,3):
try:
urls_18villavallecas.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/villa%20de%20vallecas/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_18villavallecas = []
for i in urls_18villavallecas:
info_piso = sacar_info(i)
lista_18villavallecas.append(info_piso)
df_18_villavallecas = pd.DataFrame(lista_18villavallecas, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_18_villavallecas.to_excel("df_18_villavallecas.xlsx",index=True)
###Output
_____no_output_____
###Markdown
19. Vicálvaro.
###Code
urls_19vicalvaro = []
for i in range(1,2):
try:
urls_19vicalvaro.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/vicalvaro/gilmar(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_19vicalvaro = []
for i in urls_19vicalvaro:
info_piso = sacar_info(i)
lista_19vicalvaro.append(info_piso)
df_19_vicalvaro = pd.DataFrame(lista_19vicalvaro, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_19_vicalvaro.to_excel("df_19_vicalvaro.xlsx",index=True)
###Output
_____no_output_____
###Markdown
20. San Blás.
###Code
urls_20sanblas = []
for i in range(1,11):
try:
urls_20sanblas.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/San%20blas/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_20sanblas = []
for i in urls_20sanblas:
info_piso = sacar_info(i)
lista_20sanblas.append(info_piso)
df_20_sanblas = pd.DataFrame(lista_20sanblas, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_20_sanblas.to_excel("df_20_sanblas.xlsx",index=True)
###Output
_____no_output_____
###Markdown
21. Barajas.
###Code
urls_21barajas = []
for i in range(1,3):
try:
urls_21barajas.append("C:/Users/nuria/Bootcamp%20Data%20Science/Proyecto%20Final/Html%20scraper/barajas/gilmar%20(" + str(i) + ").html")
except:
print("No hay más htmls")
lista_21barajas = []
for i in urls_21barajas:
info_piso = sacar_info(i)
lista_21barajas.append(info_piso)
df_21_barajas = pd.DataFrame(lista_21barajas, columns = ["Barrio", "Precio", "Precio_m2", "m2", "habitaciones", "baños", "terraza", "trastero", "garaje"])
df_21_barajas.to_excel("df_21_barajas.xlsx",index=True)
###Output
_____no_output_____ |
examples/Notebooks/flopy3_grid_intersection_demo.ipynb | ###Markdown
Intersecting grids with shapes_Note: This feature requires the shapely module (which is not a dependency of flopy) so must be installed by the user._This notebook shows the grid intersection functionality in flopy. The intersection methods are available through the GridIntersect object. A flopy model grid is passed to instantiate the object. Then the modelgrid can be intersected with Points, LineStrings and Polygons through the different intersect methods. There are two intersection modes: - the first (default mode) is accessed by passing `method='strtree'` to `GridIntersect` and converts the modelgrid to a list of shapes that are sorted into an STR-tree to allow fast spatial queries. This works on structured and vertex grids.- the second only works on structured grids and is accessed by passing `method='structured'` to `GridIntersect`. These methods use information from the structured grid to limit the search space for intersections and are generally faster.This notebook showcases the functionality of the GridIntersect class. Table of Contents- [GridIntersect Class](gridclass)- [Rectangular regular grid](rectgrid) - [Polygon with regular grid](rectgrid.1) - [Polyline with regular grid](rectgrid.2) - [MultiPoint with regular grid](rectgrid.3)- [Triangular grid](trigrid) - [Polygon with triangular grid](trigrid.1) - [Polyline with triangular grid](trigrid.2) - [MultiPoint with triangular grid](trigrid.3)- [Tests](tests)- [Timings](timings) Import some stuff
###Code
import sys
import os
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# run installed version of flopy or add local path
try:
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.triangle import Triangle as Triangle
from flopy.utils.gridintersect import GridIntersect
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.triangle import Triangle as Triangle
from flopy.utils.gridintersect import GridIntersect
import shapely
from shapely.geometry import Polygon, Point, LineString, MultiLineString, MultiPoint, MultiPolygon
from shapely.strtree import STRtree
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
triangle_exe = None
###Output
_____no_output_____
###Markdown
[GridIntersect Class](top)This GridIntersect class takes a flopy.mfgrid and by default converts it to a list of Shapely geometries and builds a STRTree which can be used to efficiently query the grid to perform intersections. If the method is set to 'structured', the STR-tree is not built and different intersection methods are applied (written by Chris Langevin). The following methods are available:- ` _rect_grid_to_shape_list`: convert rectangular (structured) modflow grid to list of shapely geometries- `_sort_strtree_result`: sort STRTree by cellid (to ensure lowest cellid is returned when shapes intersect with multiple grid cells)- `_usg_grid_to_shape_list`: not yet implemented, convert unstructured grid to list of shapely geometries- `_vtx_grid_to_shape_list`: convert vertex modflow grid to list of shapely geometries- `_intersect_point_shapely`: intersect Shapely point with grid- `_intersect_polygon_shapely`: intersect Shapely Polygon with grid- `_intersect_linestring_shapely`: intersect Shapely LineString with grid- `_intersect_point_structured`: intersect Shapely point with grid, using optimized search for structured grids- `_intersect_polygon_structured`: intersect Shapely Polygon with grid, using optimized search for structured grids- `_intersect_rectangle_structured`: intersect rectangle with grid to get intersecting node ids- `_intersect_linestring_structured`: intersect Shapely LineString with structured grid, using optimized search for structured grids- `_check_adjacent_cells_intersecting_line`: helper function to check adjacent cells in a structured grid for line intersections- `_get_nodes_intersecting_linestring`: helper function to follow linestring through structured grid- `intersect_point`: intersect point with grid, method depends on whether 'structured' or 'strtree' is passed at intialization.- `intersect_linestring`: intersect linestring with grid, method depends on whether 'structured' or 'strtree' is passed at intialization.- `intersect_polygon`: intersect polygon with grid, method depends on whether 'structured' or 'strtree' is passed at intialization.- `plot_point`: plot intersect result for point- `plot_polygon`: plot intersect result for polygons- `plot_polyline`: plot intersect result for linestrings [Rectangular regular grid](top)
###Code
delc = 10*np.ones(10, dtype=np.float)
delr = 10*np.ones(10, dtype=np.float)
xoff = 0.
yoff = 0.
angrot = 0.
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None, xoff=xoff, yoff=yoff, angrot=angrot)
sgr.plot()
###Output
_____no_output_____
###Markdown
[Polygon with regular grid](top)Polygon to intersect with:
###Code
p = Polygon(shell=[(15, 15), (20, 50), (35, 80.), (80, 50), (80, 40), (40, 5), (15, 12)],
holes=[[(25, 25), (25, 45), (45, 45), (45, 25)]])
###Output
_____no_output_____
###Markdown
Create GridIntersect class
###Code
ix = GridIntersect(sgr)
###Output
_____no_output_____
###Markdown
Do the intersect operation for a polygon
###Code
result = ix.intersect_polygon(p)
%timeit ix.intersect_polygon(p)
###Output
8.05 ms ± 435 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
The results are returned as a numpy.recarray containing several fields based on the intersection performed. An explanation of the data in each of the possible fields is given below:- cellids: contains the cell ids of the intersected grid cells- vertices: contains the vertices of the intersected shape- areas: contains the area of the polygon in that grid cell (only for polygons)- lenghts: contains the length of the linestring in that grid cell (only for linestrings)- ixshapes: contains the shapely object representing the intersected shape (useful for plotting the result)Looking at the data for the polygon intersection (convert to pandas.DataFrame for prettier formatting)
###Code
# pd.DataFrame(result)
result
###Output
_____no_output_____
###Markdown
Visualizing the results
###Code
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
###Output
_____no_output_____
###Markdown
Alternatively, the intersection can be calculated using special methods optimized for structured grids. Access these methods by instantiating the GridIntersect class with the `method="structured"` keyword argument.
###Code
ixs = GridIntersect(sgr, method="structured")
# pd.DataFrame(ixs.intersect_polygon(p)).head()
ixs.intersect_polygon(p)
###Output
_____no_output_____
###Markdown
[Polyline with regular grid](top)MultiLineString to intersect with:
###Code
ls1 = LineString([(95, 105), (30, 50)])
ls2 = LineString([(30, 50), (90, 22)])
ls3 = LineString([(90, 22), (0, 0)])
mls = MultiLineString(lines=[ls1, ls2, ls3])
%timeit ix.intersect_linestring(mls)
result = ix.intersect_linestring(mls)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_linestring(result, ax=ax)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
# pd.DataFrame(ixs.intersect_linestring(mls)).head()
ixs.intersect_linestring(mls)
###Output
_____no_output_____
###Markdown
[MultiPoint with regular grid](top)MultiPoint to intersect with
###Code
mp = MultiPoint(points=[Point(50.0, 0.0), Point(45., 45.),
Point(10., 10.), Point(150., 100.)])
result = ix.intersect_point(mp)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_point(result, ax=ax, s=50)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
# pd.DataFrame(ixs.intersect_point(mp))
ixs.intersect_point(mp)
###Output
_____no_output_____
###Markdown
[Triangular Grid](top)
###Code
maximum_area = 50.
x0, x1, y0, y1 = sgr.extent
domainpoly = [(x0, y0), (x0, y1), (x1, y1), (x1, y0)]
tri = Triangle(maximum_area=maximum_area, angle=30, model_ws=".",
exe_name=triangle_exe)
tri.add_polygon(domainpoly)
tri.build(verbose=False)
cell2d = tri.get_cell2d()
vertices = tri.get_vertices()
tgr = fgrid.VertexGrid(vertices, cell2d)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(modelgrid=tgr)
pmv.plot_grid(ax=ax)
###Output
_____no_output_____
###Markdown
[Polygon with triangular grid](top)
###Code
ix2 = GridIntersect(tgr)
result = ix2.intersect_polygon(p)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
###Output
_____no_output_____
###Markdown
[LineString with triangular grid](top)
###Code
result = ix2.intersect_linestring(mls)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_linestring(result, ax=ax, lw=3)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
###Output
_____no_output_____
###Markdown
[MultiPoint with triangular grid](top)
###Code
result = ix2.intersect_point(mp)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_point(result, ax=ax)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="return one intersecting grid cell per point")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
###Output
_____no_output_____
###Markdown
[Tests](top)Tests are written for Points, LineStrings and Polygons for both rectangular (regular) grids, triangular grids, and rotated and offset regular grids.
###Code
!pytest --cov-report term --cov gridintersect ../../autotest/t065_test_gridintersect.py
###Output
============================= test session starts =============================
platform win32 -- Python 3.7.3, pytest-4.3.1, py-1.8.0, pluggy-0.9.0
rootdir: C:\GitHub\flopy_db, inifile:
plugins: remotedata-0.3.1, openfiles-0.3.2, doctestplus-0.3.0, cov-2.7.1, arraydiff-0.3
collected 0 items / 1 errors
WARNING: Failed to generate report: No data to report.
=================================== ERRORS ====================================
____________ ERROR collecting autotest/t065_test_gridintersect.py _____________
ImportError while importing test module 'C:\GitHub\flopy_db\autotest\t065_test_gridintersect.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
..\..\autotest\t065_test_gridintersect.py:12: in <module>
from flopy.utils.gridintersect import GridIntersect
E ModuleNotFoundError: No module named 'flopy.utils.gridintersect'
------------------------------- Captured stdout -------------------------------
flopy is installed in C:\Users\dbrak\Anaconda3\lib\site-packages\flopy
----------- coverage: platform win32, python 3.7.3-final-0 -----------
Name Stmts Miss Cover
---------------------------
!!!!!!!!!!!!!!!!!!! Interrupted: 1 errors during collection !!!!!!!!!!!!!!!!!!!
=========================== 1 error in 3.19 seconds ===========================
###Markdown
[Timings](top)Comparing performance for the different methods in a large grid. Some helper functions are defined below
###Code
def ix_shapely_point(nrnc, npoints=100):
results = []
delc = np.ones(nrnc, dtype=np.float)
delr = np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr)
points = np.random.random((npoints, 2)) * nrnc
for p in [Point(x, y) for x, y in points]:
results.append(ix.intersect_point(p))
return np.concatenate(results, axis=0)
def ix_structured_point(nrnc, npoints=100):
delc = np.ones(nrnc, dtype=np.float)
delr = np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr, method="structured")
points = np.random.random((npoints, 2)) * nrnc
mp = MultiPoint(points=[Point(x, y) for x, y in points])
return ix.intersect_point(mp)
def ix_shapely_linestring(nrnc, ls=None):
if ls is None:
ls = LineString([(0, 0), (nrnc/3, nrnc)])
delc = np.ones(nrnc, dtype=np.float)
delr = np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr)
return ix.intersect_linestring(ls)
def ix_structured_linestring(nrnc, ls=None):
if ls is None:
ls = LineString([(0, 0), (nrnc/3, nrnc)])
delc = np.ones(nrnc, dtype=np.float)
delr = np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr, method="structured")
return ix.intersect_linestring(ls)
def ix_shapely_polygon(nrnc, p=Polygon([(10, 10), (540, 430), (730, 80), (250, 0)])):
delc = np.ones(nrnc, dtype=np.float)
delr = np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr)
return ix.intersect_polygon(p)
def ix_structured_polygon(nrnc, p=Polygon([(10, 10), (540, 430), (730, 80), (250, 0)])):
delc = np.ones(nrnc, dtype=np.float)
delr = np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr, method="structured")
return ix.intersect_polygon(p)
###Output
_____no_output_____
###Markdown
Below are some results of `%timeit` runs of some intersections on a 1000 x 1000 structured grid. For obvious reasons not having to build the STR-tree saves a significant amount of time for large grids (~ 15 seconds on my laptop).
###Code
# nrnc = 1000 # no rows and columns
nrnc = 10 # save time when testing notebook
###Output
_____no_output_____
###Markdown
For point intersections, most of the time required by the shapely approach is needed to build the STR-tree (~15 s). Obviously, the pure numpy approach used in structured mode is unbeatable.
###Code
%timeit -n 1 -r 1 ix_shapely_point(nrnc, npoints=100)
%timeit ix_structured_point(nrnc, npoints=2)
###Output
234 µs ± 15.2 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
For linestrings, following the linestring through the grid (in structured mode) reduces the amount of intersection calls by a significant amount. This is where the downside of the STR-tree query is obvious. The bounding box of the linestring covers about one third of the grid. The query only reduces the search-space by 2/3 leaving ~333k cells to try to intersect with. On top of the building of the STR-tree the intersection calls take another ~15 seconds.(Cutting the linestring into pieces would probably improve performance.)
###Code
%timeit -n 1 -r 1 ix_shapely_linestring(nrnc)
%timeit ix_structured_linestring(nrnc)
###Output
2.93 ms ± 209 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
For Polygons the difference between structured mode and shapely mode is less obvious. Building the STR-tree (~15s) and doing the intersect (~20s) takes a little bit longer than performing the intersection in structured mode. However, note that intersecting with a second similarly sized polygon in shapely mode will only require ~20s, whereas in structured mode the required time will remain ~30 seconds. For repeated intersections with Polygons, the shapely method might be preferred over the structured method.
###Code
%timeit -n 1 -r 1 ix_shapely_polygon(nrnc)
%timeit -n 1 -r 1 ix_structured_polygon(nrnc)
###Output
3.94 ms ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)
###Markdown
Intersecting model grids with shapes_Note: This feature requires the shapely and descartes packages (which are not a FloPy dependency) so must be installed by the user._This notebook shows the grid intersection functionality in flopy. The intersection methods are available through the GridIntersect object. A flopy modelgrid is passed to instantiate the object. Then the modelgrid can be intersected with Points, LineStrings and Polygons through the different intersection methods. There are three intersection modes: - the first (default mode) builds an STR-tree for fast spatial queries before calculating intersections, thereby reducing the number of grid cells it has to process. This method works for structured and vertex grids.- the second method does not construct the STR-tree, and loops through all gridcells to determine the intersection between the grid and the shape. This method also works for structured and vertex grids.- the third method only works for structured grids and uses information from the structured grid to limit the search space for intersections.This notebook showcases the functionality of the GridIntersect class. Table of Contents- [GridIntersect Class](gridclass)- [Rectangular regular grid](rectgrid) - [Polygon with regular grid](rectgrid.1) - [MultiLineString with regular grid](rectgrid.2) - [MultiPoint with regular grid](rectgrid.3)- [Vertex grid](trigrid) - [Polygon with triangular grid](trigrid.1) - [MultiLineString with triangular grid](trigrid.2) - [MultiPoint with triangular grid](trigrid.3) Import some stuff
###Code
import sys
import os
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.insert(1, "../..")
# run installed version of flopy or add local path
try:
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
import shapely
from shapely.geometry import Polygon, Point, LineString, MultiLineString, MultiPoint, MultiPolygon
from shapely.strtree import STRtree
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
###Output
3.8.6 | packaged by conda-forge | (default, Oct 7 2020, 18:42:56)
[Clang 10.0.1 ]
numpy version: 1.18.5
matplotlib version: 3.2.2
flopy version: 3.3.3
###Markdown
[GridIntersect Class](top)The GridIntersect class is constructed by passing a flopy modelgrid object to the constructor. There are options users can select to change how the intersection is calculated.- `method`: either `"vertex"` (default) or `"structured"`. If `"structured"` is passed, the intersections are performed using structured methods. These methods use information about the regular grid to limit the search space for intersection calculations. - `rtree`: either `True` (default) or `False`, only read when `method="vertex"`. When True, an STR-tree is built, which allows for fast spatial queries. Building the STR-tree does take some time however. Setting the option to False avoids building the STR-tree but requires the intersection calculation to essentially loop through all grid cells.In general the default option is robust (it always works) and fast and is therefore recommended in most situations. If you are working with a structured grid, then the `method="structured"` can speed up intersection operations (especially for points and linestrings) with the added advantage of not having to build an STR-tree. In some cases with vertex grids, it might not be worth your time building the STR-tree, in which case it can be avoided by passing `rtree=False`.The important methods in the GridIntersect object are:- `intersects()`: returns cellids for gridcells that intersect a shape- `intersect()`: for intersecting the modelgrid with point, linestrings, and polygon geometries (intersect can accept shapely geometry objects, flopy geometry object, shapefile.Shape objects, and geojson objects)- `plot_point()`: for plotting point intersection results- `plot_linestring()`: for plotting linestring intersection results- `plot_polygon()`: for plotting polygon intersection resultsIn the following sections examples of intersections are shown for structured and vertex grids for different types of shapes (Polygon, LineString and Point). [Rectangular regular grid](top)
###Code
delc = 10*np.ones(10, dtype=float)
delr = 10*np.ones(10, dtype=float)
xoff = 0.
yoff = 0.
angrot = 0.
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None, xoff=xoff, yoff=yoff, angrot=angrot)
sgr.plot()
###Output
_____no_output_____
###Markdown
[Polygon with regular grid](top)Polygon to intersect with:
###Code
p = Polygon(shell=[(15, 15), (20, 50), (35, 80.), (80, 50),
(80, 40), (40, 5), (15, 12)],
holes=[[(25, 25), (25, 45), (45, 45), (45, 25)]])
###Output
_____no_output_____
###Markdown
Create the GridIntersect class for our modelgrid. The keyword arguments are shown below, but as these are the default options, they do not need to be passed necesssarily.
###Code
ix = GridIntersect(sgr, method="vertex", rtree=True)
###Output
_____no_output_____
###Markdown
Do the intersect operation for a polygon
###Code
%timeit ix.intersect(p)
result = ix.intersect(p)
###Output
_____no_output_____
###Markdown
The results are returned as a numpy.recarray containing several fields based on the intersection performed. An explanation of the data in each of the possible fields is given below:- **cellids**: contains the cell ids of the intersected grid cells- **vertices**: contains the vertices of the intersected shape- **areas**: contains the area of the polygon in that grid cell (only for polygons)- **lenghts**: contains the length of the linestring in that grid cell (only for linestrings)- **ixshapes**: contains the shapely object representing the intersected shape (useful for plotting the result)Looking at the first few entries of the results of the polygon intersection (convert to pandas.DataFrame for prettier formatting)
###Code
result[:5]
# pd.DataFrame(result) # for prettier formatting
###Output
_____no_output_____
###Markdown
The cellids can be easily obtained
###Code
result.cellids
###Output
_____no_output_____
###Markdown
Or the areas
###Code
result.areas
###Output
_____no_output_____
###Markdown
If a user is only interested in which cells the shape intersects (and not the areas or the actual shape of the intersected object) with there is also the `intersects()` method. This method works for all types of shapely geometries.
###Code
ix.intersects(p)
###Output
_____no_output_____
###Markdown
The results of an intersection can be visualized with the plotting methods in the `GridIntersect` object:- `plot_polygon`- `plot_linestring`- `plot_point`
###Code
# create a figure and plot the grid
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
# the intersection object contains some helpful plotting commands
ix.plot_polygon(result, ax=ax)
# add black x at cell centers
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0],
"kx", label="centroids of intersected gridcells")
# add legend
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Alternatively, the intersection can be calculated using special methods optimized for structured grids. Access these methods by instantiating the GridIntersect class with the `method="structured"` keyword argument.
###Code
ixs = GridIntersect(sgr, method="structured")
###Output
_____no_output_____
###Markdown
The methods are optimized for structured grids, but for certain types of polygons there is no benefit (as can be seen in this example).
###Code
%timeit ixs.intersect(p)
###Output
10.4 ms ± 50.9 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
The result is the same as before:
###Code
result2 = ixs.intersect(p)
result2[:5]
###Output
_____no_output_____
###Markdown
[Polyline with regular grid](top)MultiLineString to intersect with:
###Code
ls1 = LineString([(95, 105), (30, 50)])
ls2 = LineString([(30, 50), (90, 22)])
ls3 = LineString([(90, 22), (0, 0)])
mls = MultiLineString(lines=[ls1, ls2, ls3])
%timeit ix.intersect(mls)
result = ix.intersect(mls)
###Output
_____no_output_____
###Markdown
Plot the result
###Code
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_linestring(result, ax=ax)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect(mls)
###Output
6.59 ms ± 356 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
[MultiPoint with regular grid](top)MultiPoint to intersect with
###Code
mp = MultiPoint(points=[Point(50.0, 0.0), Point(45., 45.),
Point(10., 10.), Point(150., 100.)])
%timeit ix.intersect(mp)
result = ix.intersect(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_point(result, ax=ax, s=50)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect(mp)
ixs.intersect(mp)
###Output
_____no_output_____
###Markdown
[Vertex Grid](top)
###Code
cell2d = [[0, 83.33333333333333, 66.66666666666667, 3, 4, 2, 7],
[1, 16.666666666666668, 33.333333333333336, 3, 4, 0, 5],
[2, 33.333333333333336, 83.33333333333333, 3, 1, 8, 4],
[3, 16.666666666666668, 66.66666666666667, 3, 5, 1, 4],
[4, 33.333333333333336, 16.666666666666668, 3, 6, 0, 4],
[5, 66.66666666666667, 16.666666666666668, 3, 4, 3, 6],
[6, 83.33333333333333, 33.333333333333336, 3, 7, 3, 4],
[7, 66.66666666666667, 83.33333333333333, 3, 8, 2, 4]]
vertices = [[0, 0.0, 0.0],
[1, 0.0, 100.0],
[2, 100.0, 100.0],
[3, 100.0, 0.0],
[4, 50.0, 50.0],
[5, 0.0, 50.0],
[6, 50.0, 0.0],
[7, 100.0, 50.0],
[8, 50.0, 100.0]]
tgr = fgrid.VertexGrid(vertices, cell2d)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(modelgrid=tgr)
pmv.plot_grid(ax=ax)
###Output
_____no_output_____
###Markdown
[Polygon with triangular grid](top)
###Code
ix2 = GridIntersect(tgr)
result = ix2.intersect(p)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[LineString with triangular grid](top)
###Code
result = ix2.intersect(mls)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_linestring(result, ax=ax, lw=3)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[MultiPoint with triangular grid](top)
###Code
result = ix2.intersect(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_point(result, ax=ax)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid],
"kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Intersecting model grids with shapes_Note: This feature requires the shapely and descartes packages (which are not a FloPy dependency) so must be installed by the user._This notebook shows the grid intersection functionality in flopy. The intersection methods are available through the GridIntersect object. A flopy modelgrid is passed to instantiate the object. Then the modelgrid can be intersected with Points, LineStrings and Polygons through the different intersection methods. There are three intersection modes: - the first (default mode) builds an STR-tree for fast spatial queries before calculating intersections, thereby reducing the number of grid cells it has to process. This method works for structured and vertex grids.- the second method does not construct the STR-tree, and loops through all gridcells to determine the intersection between the grid and the shape. This method also works for structured and vertex grids.- the third method only works for structured grids and uses information from the structured grid to limit the search space for intersections.This notebook showcases the functionality of the GridIntersect class. Table of Contents- [GridIntersect Class](gridclass)- [Rectangular regular grid](rectgrid) - [Polygon with regular grid](rectgrid.1) - [MultiLineString with regular grid](rectgrid.2) - [MultiPoint with regular grid](rectgrid.3)- [Vertex grid](trigrid) - [Polygon with triangular grid](trigrid.1) - [MultiLineString with triangular grid](trigrid.2) - [MultiPoint with triangular grid](trigrid.3) Import some stuff
###Code
import sys
import os
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.insert(1, "../..")
# run installed version of flopy or add local path
try:
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
import shapely
from shapely.geometry import Polygon, Point, LineString, MultiLineString, MultiPoint, MultiPolygon
from shapely.strtree import STRtree
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
###Output
3.7.7 (default, Mar 26 2020, 10:32:53)
[Clang 4.0.1 (tags/RELEASE_401/final)]
numpy version: 1.19.2
matplotlib version: 3.3.0
flopy version: 3.3.2
###Markdown
[GridIntersect Class](top)The GridIntersect class is constructed by passing a flopy modelgrid object to the constructor. There are options users can select to change how the intersection is calculated.- `method`: either `"vertex"` (default) or `"structured"`. If `"structured"` is passed, the intersections are performed using structured methods. These methods use information about the regular grid to limit the search space for intersection calculations. - `rtree`: either `True` (default) or `False`, only read when `method="vertex"`. When True, an STR-tree is built, which allows for fast spatial queries. Building the STR-tree does take some time however. Setting the option to False avoids building the STR-tree but requires the intersection calculation to essentially loop through all grid cells.In general the default option is robust (it always works) and fast and is therefore recommended in most situations. If you are working with a structured grid, then the `method="structured"` can speed up intersection operations (especially for points and linestrings) with the added advantage of not having to build an STR-tree. In some cases with vertex grids, it might not be worth your time building the STR-tree, in which case it can be avoided by passing `rtree=False`.The important methods in the GridIntersect object are:- `intersects()`: returns cellids for gridcells that intersect a shape- `intersect()`: for intersecting the modelgrid with point, linestrings, and polygon geometries (intersect can accept shapely geometry objects, flopy geometry object, shapefile.Shape objects, and geojson objects)- `plot_point()`: for plotting point intersection results- `plot_linestring()`: for plotting linestring intersection results- `plot_polygon()`: for plotting polygon intersection resultsIn the following sections examples of intersections are shown for structured and vertex grids for different types of shapes (Polygon, LineString and Point). [Rectangular regular grid](top)
###Code
delc = 10*np.ones(10, dtype=float)
delr = 10*np.ones(10, dtype=float)
xoff = 0.
yoff = 0.
angrot = 0.
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None, xoff=xoff, yoff=yoff, angrot=angrot)
sgr.plot()
###Output
_____no_output_____
###Markdown
[Polygon with regular grid](top)Polygon to intersect with:
###Code
p = Polygon(shell=[(15, 15), (20, 50), (35, 80.), (80, 50),
(80, 40), (40, 5), (15, 12)],
holes=[[(25, 25), (25, 45), (45, 45), (45, 25)]])
###Output
_____no_output_____
###Markdown
Create the GridIntersect class for our modelgrid. The keyword arguments are shown below, but as these are the default options, they do not need to be passed necesssarily.
###Code
ix = GridIntersect(sgr, method="vertex", rtree=True)
###Output
_____no_output_____
###Markdown
Do the intersect operation for a polygon
###Code
%timeit ix.intersect(p)
result = ix.intersect(p)
###Output
_____no_output_____
###Markdown
The results are returned as a numpy.recarray containing several fields based on the intersection performed. An explanation of the data in each of the possible fields is given below:- **cellids**: contains the cell ids of the intersected grid cells- **vertices**: contains the vertices of the intersected shape- **areas**: contains the area of the polygon in that grid cell (only for polygons)- **lenghts**: contains the length of the linestring in that grid cell (only for linestrings)- **ixshapes**: contains the shapely object representing the intersected shape (useful for plotting the result)Looking at the first few entries of the results of the polygon intersection (convert to pandas.DataFrame for prettier formatting)
###Code
result[:5]
# pd.DataFrame(result) # for prettier formatting
###Output
_____no_output_____
###Markdown
The cellids can be easily obtained
###Code
result.cellids
###Output
_____no_output_____
###Markdown
Or the areas
###Code
result.areas
###Output
_____no_output_____
###Markdown
If a user is only interested in which cells the shape intersects (and not the areas or the actual shape of the intersected object) with there is also the `intersects()` method. This method works for all types of shapely geometries.
###Code
ix.intersects(p)
###Output
_____no_output_____
###Markdown
The results of an intersection can be visualized with the plotting methods in the `GridIntersect` object:- `plot_polygon`- `plot_linestring`- `plot_point`
###Code
# create a figure and plot the grid
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
# the intersection object contains some helpful plotting commands
ix.plot_polygon(result, ax=ax)
# add black x at cell centers
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0],
"kx", label="centroids of intersected gridcells")
# add legend
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Alternatively, the intersection can be calculated using special methods optimized for structured grids. Access these methods by instantiating the GridIntersect class with the `method="structured"` keyword argument.
###Code
ixs = GridIntersect(sgr, method="structured")
###Output
_____no_output_____
###Markdown
The methods are optimized for structured grids, but for certain types of polygons there is no benefit (as can be seen in this example).
###Code
%timeit ixs.intersect(p)
###Output
11.1 ms ± 828 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
The result is the same as before:
###Code
result2 = ixs.intersect(p)
result2[:5]
###Output
_____no_output_____
###Markdown
[Polyline with regular grid](top)MultiLineString to intersect with:
###Code
ls1 = LineString([(95, 105), (30, 50)])
ls2 = LineString([(30, 50), (90, 22)])
ls3 = LineString([(90, 22), (0, 0)])
mls = MultiLineString(lines=[ls1, ls2, ls3])
%timeit ix.intersect(mls)
result = ix.intersect(mls)
###Output
_____no_output_____
###Markdown
Plot the result
###Code
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_linestring(result, ax=ax)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect(mls)
###Output
7.22 ms ± 277 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
[MultiPoint with regular grid](top)MultiPoint to intersect with
###Code
mp = MultiPoint(points=[Point(50.0, 0.0), Point(45., 45.),
Point(10., 10.), Point(150., 100.)])
%timeit ix.intersect(mp)
result = ix.intersect(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_point(result, ax=ax, s=50)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect(mp)
ixs.intersect(mp)
###Output
_____no_output_____
###Markdown
[Vertex Grid](top)
###Code
cell2d = [[0, 83.33333333333333, 66.66666666666667, 3, 4, 2, 7],
[1, 16.666666666666668, 33.333333333333336, 3, 4, 0, 5],
[2, 33.333333333333336, 83.33333333333333, 3, 1, 8, 4],
[3, 16.666666666666668, 66.66666666666667, 3, 5, 1, 4],
[4, 33.333333333333336, 16.666666666666668, 3, 6, 0, 4],
[5, 66.66666666666667, 16.666666666666668, 3, 4, 3, 6],
[6, 83.33333333333333, 33.333333333333336, 3, 7, 3, 4],
[7, 66.66666666666667, 83.33333333333333, 3, 8, 2, 4]]
vertices = [[0, 0.0, 0.0],
[1, 0.0, 100.0],
[2, 100.0, 100.0],
[3, 100.0, 0.0],
[4, 50.0, 50.0],
[5, 0.0, 50.0],
[6, 50.0, 0.0],
[7, 100.0, 50.0],
[8, 50.0, 100.0]]
tgr = fgrid.VertexGrid(vertices, cell2d)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(modelgrid=tgr)
pmv.plot_grid(ax=ax)
###Output
_____no_output_____
###Markdown
[Polygon with triangular grid](top)
###Code
ix2 = GridIntersect(tgr)
result = ix2.intersect(p)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[LineString with triangular grid](top)
###Code
result = ix2.intersect(mls)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_linestring(result, ax=ax, lw=3)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[MultiPoint with triangular grid](top)
###Code
result = ix2.intersect(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_point(result, ax=ax)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid],
"kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Intersecting grids with shapes_Note: This feature requires the shapely and descartes packages (which are not a FloPy dependency) so must be installed by the user._This notebook shows the grid intersection functionality in flopy. The intersection methods are available through the GridIntersect object. A flopy model grid is passed to instantiate the object. Then the modelgrid can be intersected with Points, LineStrings and Polygons through the different intersect methods. There are two intersection modes: - the first (default mode) is accessed by passing `method='strtree'` to `GridIntersect` and converts the modelgrid to a list of shapes that are sorted into an STR-tree to allow fast spatial queries. This works on structured and vertex grids.- the second only works on structured grids and is accessed by passing `method='structured'` to `GridIntersect`. These methods use information from the structured grid to limit the search space for intersections and are generally faster.This notebook showcases the functionality of the GridIntersect class. Table of Contents- [GridIntersect Class](gridclass)- [Rectangular regular grid](rectgrid) - [Polygon with regular grid](rectgrid.1) - [Polyline with regular grid](rectgrid.2) - [MultiPoint with regular grid](rectgrid.3)- [Triangular grid](trigrid) - [Polygon with triangular grid](trigrid.1) - [Polyline with triangular grid](trigrid.2) - [MultiPoint with triangular grid](trigrid.3)- [Tests](tests)- [Timings](timings) Import some stuff
###Code
import sys
import os
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# run installed version of flopy or add local path
try:
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.triangle import Triangle as Triangle
from flopy.utils.gridintersect import GridIntersect
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.triangle import Triangle as Triangle
from flopy.utils.gridintersect import GridIntersect
import shapely
from shapely.geometry import Polygon, Point, LineString, MultiLineString, MultiPoint, MultiPolygon
from shapely.strtree import STRtree
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
triangle_exe = None
###Output
_____no_output_____
###Markdown
[GridIntersect Class](top)This GridIntersect class takes a flopy.mfgrid and by default converts it to a list of Shapely geometries and builds a STRTree which can be used to efficiently query the grid to perform intersections. If the method is set to 'structured', the STR-tree is not built and different intersection methods are applied (written by Chris Langevin). The following methods are available:- ` _rect_grid_to_shape_list`: convert rectangular (structured) modflow grid to list of shapely geometries- `_sort_strtree_result`: sort STRTree by cellid (to ensure lowest cellid is returned when shapes intersect with multiple grid cells)- `_usg_grid_to_shape_list`: not yet implemented, convert unstructured grid to list of shapely geometries- `_vtx_grid_to_shape_list`: convert vertex modflow grid to list of shapely geometries- `_intersect_point_shapely`: intersect Shapely point with grid- `_intersect_polygon_shapely`: intersect Shapely Polygon with grid- `_intersect_linestring_shapely`: intersect Shapely LineString with grid- `_intersect_point_structured`: intersect Shapely point with grid, using optimized search for structured grids- `_intersect_polygon_structured`: intersect Shapely Polygon with grid, using optimized search for structured grids- `_intersect_rectangle_structured`: intersect rectangle with grid to get intersecting node ids- `_intersect_linestring_structured`: intersect Shapely LineString with structured grid, using optimized search for structured grids- `_check_adjacent_cells_intersecting_line`: helper function to check adjacent cells in a structured grid for line intersections- `_get_nodes_intersecting_linestring`: helper function to follow linestring through structured grid- `intersect_point`: intersect point with grid, method depends on whether 'structured' or 'strtree' is passed at intialization.- `intersect_linestring`: intersect linestring with grid, method depends on whether 'structured' or 'strtree' is passed at intialization.- `intersect_polygon`: intersect polygon with grid, method depends on whether 'structured' or 'strtree' is passed at intialization.- `plot_point`: plot intersect result for point- `plot_polygon`: plot intersect result for polygons- `plot_polyline`: plot intersect result for linestrings [Rectangular regular grid](top)
###Code
delc = 10*np.ones(10, dtype=np.float)
delr = 10*np.ones(10, dtype=np.float)
xoff = 0.
yoff = 0.
angrot = 0.
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None, xoff=xoff, yoff=yoff, angrot=angrot)
sgr.plot()
###Output
_____no_output_____
###Markdown
[Polygon with regular grid](top)Polygon to intersect with:
###Code
p = Polygon(shell=[(15, 15), (20, 50), (35, 80.), (80, 50), (80, 40), (40, 5), (15, 12)],
holes=[[(25, 25), (25, 45), (45, 45), (45, 25)]])
###Output
_____no_output_____
###Markdown
Create GridIntersect class
###Code
ix = GridIntersect(sgr)
###Output
_____no_output_____
###Markdown
Do the intersect operation for a polygon
###Code
result = ix.intersect_polygon(p)
%timeit ix.intersect_polygon(p)
###Output
10.5 ms ± 87 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
The results are returned as a numpy.recarray containing several fields based on the intersection performed. An explanation of the data in each of the possible fields is given below:- cellids: contains the cell ids of the intersected grid cells- vertices: contains the vertices of the intersected shape- areas: contains the area of the polygon in that grid cell (only for polygons)- lenghts: contains the length of the linestring in that grid cell (only for linestrings)- ixshapes: contains the shapely object representing the intersected shape (useful for plotting the result)Looking at the data for the polygon intersection (convert to pandas.DataFrame for prettier formatting)
###Code
# pd.DataFrame(result)
result
###Output
_____no_output_____
###Markdown
Visualizing the results
###Code
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0],
"kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
###Output
_____no_output_____
###Markdown
Alternatively, the intersection can be calculated using special methods optimized for structured grids. Access these methods by instantiating the GridIntersect class with the `method="structured"` keyword argument.
###Code
ixs = GridIntersect(sgr, method="structured")
# pd.DataFrame(ixs.intersect_polygon(p)).head()
ixs.intersect_polygon(p)
###Output
_____no_output_____
###Markdown
[Polyline with regular grid](top)MultiLineString to intersect with:
###Code
ls1 = LineString([(95, 105), (30, 50)])
ls2 = LineString([(30, 50), (90, 22)])
ls3 = LineString([(90, 22), (0, 0)])
mls = MultiLineString(lines=[ls1, ls2, ls3])
%timeit ix.intersect_linestring(mls)
result = ix.intersect_linestring(mls)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_linestring(result, ax=ax)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
# pd.DataFrame(ixs.intersect_linestring(mls)).head()
ixs.intersect_linestring(mls)
###Output
_____no_output_____
###Markdown
[MultiPoint with regular grid](top)MultiPoint to intersect with
###Code
mp = MultiPoint(points=[Point(50.0, 0.0), Point(45., 45.),
Point(10., 10.), Point(150., 100.)])
result = ix.intersect_point(mp)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_point(result, ax=ax, s=50)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
# pd.DataFrame(ixs.intersect_point(mp))
ixs.intersect_point(mp)
###Output
_____no_output_____
###Markdown
[Triangular Grid](top)
###Code
maximum_area = 50.
x0, x1, y0, y1 = sgr.extent
domainpoly = [(x0, y0), (x0, y1), (x1, y1), (x1, y0)]
tri = Triangle(maximum_area=maximum_area, angle=30, model_ws=".",
exe_name=triangle_exe)
tri.add_polygon(domainpoly)
tri.build(verbose=False)
cell2d = tri.get_cell2d()
vertices = tri.get_vertices()
tgr = fgrid.VertexGrid(vertices, cell2d)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(modelgrid=tgr)
pmv.plot_grid(ax=ax)
###Output
_____no_output_____
###Markdown
[Polygon with triangular grid](top)
###Code
ix2 = GridIntersect(tgr)
result = ix2.intersect_polygon(p)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
###Output
_____no_output_____
###Markdown
[LineString with triangular grid](top)
###Code
result = ix2.intersect_linestring(mls)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_linestring(result, ax=ax, lw=3)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
###Output
_____no_output_____
###Markdown
[MultiPoint with triangular grid](top)
###Code
result = ix2.intersect_point(mp)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_point(result, ax=ax)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="return one intersecting grid cell per point")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best")
###Output
_____no_output_____
###Markdown
[Tests](top)Tests are written for Points, LineStrings and Polygons for both rectangular (regular) grids, triangular grids, and rotated and offset regular grids.
###Code
!pytest --cov-report term --cov gridintersect ../../autotest/t065_test_gridintersect.py
###Output
/bin/sh: pytest: command not found
###Markdown
[Timings](top)Comparing performance for the different methods in a large grid. Some helper functions are defined below
###Code
def ix_shapely_point(nrnc, npoints=100):
results = []
delc = np.ones(nrnc, dtype=np.float)
delr = np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr)
points = np.random.random((npoints, 2)) * nrnc
for p in [Point(x, y) for x, y in points]:
results.append(ix.intersect_point(p))
return np.concatenate(results, axis=0)
def ix_structured_point(nrnc, npoints=100):
delc = np.ones(nrnc, dtype=np.float)
delr = np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr, method="structured")
points = np.random.random((npoints, 2)) * nrnc
mp = MultiPoint(points=[Point(x, y) for x, y in points])
return ix.intersect_point(mp)
def ix_shapely_linestring(nrnc, ls=None):
if ls is None:
ls = LineString([(0, 0), (nrnc/3, nrnc)])
delc = np.ones(nrnc, dtype=np.float)
delr = np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr)
return ix.intersect_linestring(ls)
def ix_structured_linestring(nrnc, ls=None):
if ls is None:
ls = LineString([(0, 0), (nrnc/3, nrnc)])
delc = np.ones(nrnc, dtype=np.float)
delr = np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr, method="structured")
return ix.intersect_linestring(ls)
def ix_shapely_polygon(nrnc, p=Polygon([(10, 10), (540, 430), (730, 80), (250, 0)])):
delc = np.ones(nrnc, dtype=np.float)
delr = np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr)
return ix.intersect_polygon(p)
def ix_structured_polygon(nrnc, p=Polygon([(10, 10), (540, 430), (730, 80), (250, 0)])):
delc = np.ones(nrnc, dtype=np.float)
delr = np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr, method="structured")
return ix.intersect_polygon(p)
###Output
_____no_output_____
###Markdown
Below are some results of `%timeit` runs of some intersections on a 1000 x 1000 structured grid. For obvious reasons not having to build the STR-tree saves a significant amount of time for large grids (~ 15 seconds on my laptop).
###Code
# nrnc = 1000 # no rows and columns
nrnc = 10 # save time when testing notebook
###Output
_____no_output_____
###Markdown
For point intersections, most of the time required by the shapely approach is needed to build the STR-tree (~15 s). Obviously, the pure numpy approach used in structured mode is unbeatable.
###Code
%timeit -n 1 -r 1 ix_shapely_point(nrnc, npoints=100)
%timeit ix_structured_point(nrnc, npoints=2)
###Output
267 µs ± 10.9 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
For linestrings, following the linestring through the grid (in structured mode) reduces the amount of intersection calls by a significant amount. This is where the downside of the STR-tree query is obvious. The bounding box of the linestring covers about one third of the grid. The query only reduces the search-space by 2/3 leaving ~333k cells to try to intersect with. On top of the building of the STR-tree the intersection calls take another ~15 seconds.(Cutting the linestring into pieces would probably improve performance.)
###Code
%timeit -n 1 -r 1 ix_shapely_linestring(nrnc)
%timeit ix_structured_linestring(nrnc)
###Output
3.55 ms ± 22.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
For Polygons the difference between structured mode and shapely mode is less obvious. Building the STR-tree (~15s) and doing the intersect (~20s) takes a little bit longer than performing the intersection in structured mode. However, note that intersecting with a second similarly sized polygon in shapely mode will only require ~20s, whereas in structured mode the required time will remain ~30 seconds. For repeated intersections with Polygons, the shapely method might be preferred over the structured method.
###Code
%timeit -n 1 -r 1 ix_shapely_polygon(nrnc)
%timeit -n 1 -r 1 ix_structured_polygon(nrnc)
###Output
4.16 ms ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)
###Markdown
Intersecting grids with shapes_Note: This feature requires the shapely and descartes packages (which are not a FloPy dependency) so must be installed by the user._This notebook shows the grid intersection functionality in flopy. The intersection methods are available through the GridIntersect object. A flopy model grid is passed to instantiate the object. Then the modelgrid can be intersected with Points, LineStrings and Polygons through the different intersect methods. There are two intersection modes: - the first (default mode) is accessed by passing `method='strtree'` to `GridIntersect` and converts the modelgrid to a list of shapes that are sorted into an STR-tree to allow fast spatial queries. This works on structured and vertex grids.- the second only works on structured grids and is accessed by passing `method='structured'` to `GridIntersect`. These methods use information from the structured grid to limit the search space for intersections and are generally faster.This notebook showcases the functionality of the GridIntersect class. Table of Contents- [GridIntersect Class](gridclass)- [Rectangular regular grid](rectgrid) - [Polygon with regular grid](rectgrid.1) - [Polyline with regular grid](rectgrid.2) - [MultiPoint with regular grid](rectgrid.3)- [Triangular grid](trigrid) - [Polygon with triangular grid](trigrid.1) - [Polyline with triangular grid](trigrid.2) - [MultiPoint with triangular grid](trigrid.3)- [Tests](tests)- [Timings](timings) Import some stuff
###Code
import sys
import os
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.insert(1, "../..")
# run installed version of flopy or add local path
try:
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.triangle import Triangle as Triangle
from flopy.utils.gridintersect import GridIntersect
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.triangle import Triangle as Triangle
from flopy.utils.gridintersect import GridIntersect
import shapely
from shapely.geometry import Polygon, Point, LineString, MultiLineString, MultiPoint, MultiPolygon
from shapely.strtree import STRtree
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
triangle_exe = None
###Output
_____no_output_____
###Markdown
[GridIntersect Class](top)This GridIntersect class takes a flopy.mfgrid and by default converts it to a list of Shapely geometries and builds a STRTree which can be used to efficiently query the grid to perform intersections. If the method is set to 'structured', the STR-tree is not built and different intersection methods are applied (written by Chris Langevin). The following methods are available:- ` _rect_grid_to_shape_list`: convert rectangular (structured) modflow grid to list of shapely geometries- `_sort_strtree_result`: sort STRTree by cellid (to ensure lowest cellid is returned when shapes intersect with multiple grid cells)- `_usg_grid_to_shape_list`: not yet implemented, convert unstructured grid to list of shapely geometries- `_vtx_grid_to_shape_list`: convert vertex modflow grid to list of shapely geometries- `_intersect_point_shapely`: intersect Shapely point with grid- `_intersect_polygon_shapely`: intersect Shapely Polygon with grid- `_intersect_linestring_shapely`: intersect Shapely LineString with grid- `_intersect_point_structured`: intersect Shapely point with grid, using optimized search for structured grids- `_intersect_polygon_structured`: intersect Shapely Polygon with grid, using optimized search for structured grids- `_intersect_rectangle_structured`: intersect rectangle with grid to get intersecting node ids- `_intersect_linestring_structured`: intersect Shapely LineString with structured grid, using optimized search for structured grids- `_check_adjacent_cells_intersecting_line`: helper function to check adjacent cells in a structured grid for line intersections- `_get_nodes_intersecting_linestring`: helper function to follow linestring through structured grid- `intersect_point`: intersect point with grid, method depends on whether 'structured' or 'strtree' is passed at intialization.- `intersect_linestring`: intersect linestring with grid, method depends on whether 'structured' or 'strtree' is passed at intialization.- `intersect_polygon`: intersect polygon with grid, method depends on whether 'structured' or 'strtree' is passed at intialization.- `plot_point`: plot intersect result for point- `plot_polygon`: plot intersect result for polygons- `plot_polyline`: plot intersect result for linestrings [Rectangular regular grid](top)
###Code
delc = 10*np.ones(10, dtype=np.float)
delr = 10*np.ones(10, dtype=np.float)
xoff = 0.
yoff = 0.
angrot = 0.
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None, xoff=xoff, yoff=yoff, angrot=angrot)
sgr.plot()
###Output
_____no_output_____
###Markdown
[Polygon with regular grid](top)Polygon to intersect with:
###Code
p = Polygon(shell=[(15, 15), (20, 50), (35, 80.), (80, 50), (80, 40), (40, 5), (15, 12)],
holes=[[(25, 25), (25, 45), (45, 45), (45, 25)]])
###Output
_____no_output_____
###Markdown
Create GridIntersect class
###Code
ix = GridIntersect(sgr)
###Output
_____no_output_____
###Markdown
Do the intersect operation for a polygon
###Code
result = ix.intersect_polygon(p)
%timeit ix.intersect_polygon(p)
###Output
9.01 ms ± 334 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
The results are returned as a numpy.recarray containing several fields based on the intersection performed. An explanation of the data in each of the possible fields is given below:- cellids: contains the cell ids of the intersected grid cells- vertices: contains the vertices of the intersected shape- areas: contains the area of the polygon in that grid cell (only for polygons)- lenghts: contains the length of the linestring in that grid cell (only for linestrings)- ixshapes: contains the shapely object representing the intersected shape (useful for plotting the result)Looking at the data for the polygon intersection (convert to pandas.DataFrame for prettier formatting)
###Code
# pd.DataFrame(result)
result
###Output
_____no_output_____
###Markdown
Visualizing the results
###Code
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0],
"kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Alternatively, the intersection can be calculated using special methods optimized for structured grids. Access these methods by instantiating the GridIntersect class with the `method="structured"` keyword argument.
###Code
ixs = GridIntersect(sgr, method="structured")
# pd.DataFrame(ixs.intersect_polygon(p)).head()
ixs.intersect_polygon(p)
###Output
_____no_output_____
###Markdown
[Polyline with regular grid](top)MultiLineString to intersect with:
###Code
ls1 = LineString([(95, 105), (30, 50)])
ls2 = LineString([(30, 50), (90, 22)])
ls3 = LineString([(90, 22), (0, 0)])
mls = MultiLineString(lines=[ls1, ls2, ls3])
%timeit ix.intersect_linestring(mls)
result = ix.intersect_linestring(mls)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_linestring(result, ax=ax)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
# pd.DataFrame(ixs.intersect_linestring(mls)).head()
ixs.intersect_linestring(mls)
###Output
_____no_output_____
###Markdown
[MultiPoint with regular grid](top)MultiPoint to intersect with
###Code
mp = MultiPoint(points=[Point(50.0, 0.0), Point(45., 45.),
Point(10., 10.), Point(150., 100.)])
result = ix.intersect_point(mp)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_point(result, ax=ax, s=50)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
# pd.DataFrame(ixs.intersect_point(mp))
ixs.intersect_point(mp)
###Output
_____no_output_____
###Markdown
[Triangular Grid](top)
###Code
cell2d = [[0, 83.33333333333333, 66.66666666666667, 3, 4, 2, 7],
[1, 16.666666666666668, 33.333333333333336, 3, 4, 0, 5],
[2, 33.333333333333336, 83.33333333333333, 3, 1, 8, 4],
[3, 16.666666666666668, 66.66666666666667, 3, 5, 1, 4],
[4, 33.333333333333336, 16.666666666666668, 3, 6, 0, 4],
[5, 66.66666666666667, 16.666666666666668, 3, 4, 3, 6],
[6, 83.33333333333333, 33.333333333333336, 3, 7, 3, 4],
[7, 66.66666666666667, 83.33333333333333, 3, 8, 2, 4]]
vertices = [[0, 0.0, 0.0],
[1, 0.0, 100.0],
[2, 100.0, 100.0],
[3, 100.0, 0.0],
[4, 50.0, 50.0],
[5, 0.0, 50.0],
[6, 50.0, 0.0],
[7, 100.0, 50.0],
[8, 50.0, 100.0]]
tgr = fgrid.VertexGrid(vertices, cell2d)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(modelgrid=tgr)
pmv.plot_grid(ax=ax)
###Output
_____no_output_____
###Markdown
[Polygon with triangular grid](top)
###Code
ix2 = GridIntersect(tgr)
result = ix2.intersect_polygon(p)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[LineString with triangular grid](top)
###Code
result = ix2.intersect_linestring(mls)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_linestring(result, ax=ax, lw=3)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[MultiPoint with triangular grid](top)
###Code
result = ix2.intersect_point(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_point(result, ax=ax)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid],
"kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[Tests](top)Tests are written for Points, LineStrings and Polygons for both rectangular (regular) grids, triangular grids, and rotated and offset regular grids.
###Code
# !pytest --cov-report term --cov gridintersect ../../autotest/t065_test_gridintersect.py
###Output
_____no_output_____
###Markdown
[Timings](top)Comparing performance for the different methods in a large grid. Some helper functions are defined below
###Code
def ix_shapely_point(nrnc, npoints=100):
results = []
delc = 1000/nrnc * np.ones(nrnc, dtype=np.float)
delr = 1000/nrnc * np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr)
points = np.random.random((npoints, 2)) * 1000
for p in [Point(x, y) for x, y in points]:
results.append(ix.intersect_point(p))
return np.concatenate(results, axis=0)
def ix_structured_point(nrnc, npoints=100):
results = []
delc = 1000/nrnc * np.ones(nrnc, dtype=np.float)
delr = 1000/nrnc * np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr, method="structured")
points = np.random.random((npoints, 2)) * 1000
for p in [Point(x, y) for x, y in points]:
results.append(ix.intersect_point(p))
return np.concatenate(results, axis=0)
def ix_shapely_linestring(nrnc, ls=None):
if ls is None:
ls = LineString([(0, 0), (nrnc/3, nrnc)])
delc = 1000/nrnc * np.ones(nrnc, dtype=np.float)
delr = 1000/nrnc * np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr)
return ix.intersect_linestring(ls)
def ix_structured_linestring(nrnc, ls=None):
if ls is None:
ls = LineString([(0, 0), (nrnc/3, nrnc)])
delc = 1000/nrnc * np.ones(nrnc, dtype=np.float)
delr = 1000/nrnc * np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr, method="structured")
return ix.intersect_linestring(ls)
def ix_shapely_polygon(nrnc, p=Polygon([(10, 10), (540, 430), (730, 80), (250, 0)])):
delc = 1000/nrnc * np.ones(nrnc, dtype=np.float)
delr = 1000/nrnc * np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr)
return ix.intersect_polygon(p)
def ix_structured_polygon(nrnc, p=Polygon([(10, 10), (540, 430), (730, 80), (250, 0)])):
delc = 1000/nrnc * np.ones(nrnc, dtype=np.float)
delr = 1000/nrnc * np.ones(nrnc, dtype=np.float)
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None)
ix = GridIntersect(sgr, method="structured")
return ix.intersect_polygon(p)
###Output
_____no_output_____
###Markdown
Below are some results of `%timeit` runs. The listed times are for intersections in a 1000 x 1000 structured grid on a Intel Core i7 (8th gen). To keep the notebook running quickly in the autotests, the grid is currently set to 10 x 10.
###Code
# nrnc = 1000 # no rows and columns
nrnc = 10 # save time when testing notebook
###Output
_____no_output_____
###Markdown
For point intersections, most of the time required by the shapely approach is needed to build the STR-tree (~15 s). Obviously, the pure numpy approach used in structured mode is unbeatable. Not having to build the STR-tree saves a significant amount of time for large grids.
###Code
%timeit -n 1 -r 1 ix_shapely_point(nrnc, npoints=100)
%timeit ix_structured_point(nrnc, npoints=100)
###Output
11.4 ms ± 516 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
For linestrings, following the linestring through the grid (in structured mode) reduces the amount of intersection calls by a significant amount. This is where the downside of the STR-tree query is obvious. The bounding box of the linestring covers about one third of the grid. The query only reduces the search-space by 2/3 leaving ~333k cells to try to intersect with. On top of the building of the STR-tree the intersection calls take another ~15 seconds.(Cutting the linestring into pieces would probably improve performance.)
###Code
%timeit -n 1 -r 1 ix_shapely_linestring(nrnc)
%timeit ix_structured_linestring(nrnc)
###Output
794 µs ± 37.3 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
For Polygons the difference between structured mode and shapely mode is less obvious. Building the STR-tree (\~15s) and doing the intersect (\~20s) takes a little bit longer than performing the intersection in structured mode. However, note that intersecting with a second similarly sized polygon in shapely mode will only require ~20s, whereas in structured mode the required time will remain ~30 seconds. For repeated intersections with Polygons, the shapely method might be preferred over the structured method.
###Code
%timeit -n 1 -r 1 ix_shapely_polygon(nrnc)
%timeit -n 1 -r 1 ix_structured_polygon(nrnc)
###Output
13.3 ms ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)
###Markdown
Intersecting model grids with shapes_Note: This feature requires the shapely and descartes packages (which are not a FloPy dependency) so must be installed by the user._This notebook shows the grid intersection functionality in flopy. The intersection methods are available through the GridIntersect object. A flopy modelgrid is passed to instantiate the object. Then the modelgrid can be intersected with Points, LineStrings and Polygons through the different intersection methods. There are three intersection modes: - the first (default mode) builds an STR-tree for fast spatial queries before calculating intersections, thereby reducing the number of grid cells it has to process. This method works for structured and vertex grids.- the second method does not construct the STR-tree, and loops through all gridcells to determine the intersection between the grid and the shape. This method also works for structured and vertex grids.- the third method only works for structured grids and uses information from the structured grid to limit the search space for intersections.This notebook showcases the functionality of the GridIntersect class. Table of Contents- [GridIntersect Class](gridclass)- [Rectangular regular grid](rectgrid) - [Polygon with regular grid](rectgrid.1) - [MultiLineString with regular grid](rectgrid.2) - [MultiPoint with regular grid](rectgrid.3)- [Vertex grid](trigrid) - [Polygon with triangular grid](trigrid.1) - [MultiLineString with triangular grid](trigrid.2) - [MultiPoint with triangular grid](trigrid.3) Import some stuff
###Code
import sys
import os
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.insert(1, "../..")
# run installed version of flopy or add local path
try:
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
import shapely
from shapely.geometry import Polygon, Point, LineString, MultiLineString, MultiPoint, MultiPolygon
from shapely.strtree import STRtree
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
###Output
3.7.7 (default, Mar 26 2020, 10:32:53)
[Clang 4.0.1 (tags/RELEASE_401/final)]
numpy version: 1.19.2
matplotlib version: 3.3.0
flopy version: 3.3.2
###Markdown
[GridIntersect Class](top)The GridIntersect class is constructed by passing a flopy modelgrid object to the constructor. There are options users can select to change how the intersection is calculated.- `method`: either `"vertex"` (default) or `"structured"`. If `"structured"` is passed, the intersections are performed using structured methods. These methods use information about the regular grid to limit the search space for intersection calculations. - `rtree`: either `True` (default) or `False`, only read when `method="vertex"`. When True, an STR-tree is built, which allows for fast spatial queries. Building the STR-tree does take some time however. Setting the option to False avoids building the STR-tree but requires the intersection calculation to essentially loop through all grid cells.In general the default option is robust (it always works) and fast and is therefore recommended in most situations. If you are working with a structured grid, then the `method="structured"` can speed up intersection operations (especially for points and linestrings) with the added advantage of not having to build an STR-tree. In some cases with vertex grids, it might not be worth your time building the STR-tree, in which case it can be avoided by passing `rtree=False`.The important methods in the GridIntersect object are:- `intersects()`: returns cellids for gridcells that intersect a shape- `intersect()`: for intersecting the modelgrid with point, linestrings, and polygon geometries (intersect can accept shapely geometry objects, flopy geometry object, shapefile.Shape objects, and geojson objects)- `plot_point()`: for plotting point intersection results- `plot_linestring()`: for plotting linestring intersection results- `plot_polygon()`: for plotting polygon intersection resultsIn the following sections examples of intersections are shown for structured and vertex grids for different types of shapes (Polygon, LineString and Point). [Rectangular regular grid](top)
###Code
delc = 10*np.ones(10, dtype=np.float)
delr = 10*np.ones(10, dtype=np.float)
xoff = 0.
yoff = 0.
angrot = 0.
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None, xoff=xoff, yoff=yoff, angrot=angrot)
sgr.plot()
###Output
_____no_output_____
###Markdown
[Polygon with regular grid](top)Polygon to intersect with:
###Code
p = Polygon(shell=[(15, 15), (20, 50), (35, 80.), (80, 50),
(80, 40), (40, 5), (15, 12)],
holes=[[(25, 25), (25, 45), (45, 45), (45, 25)]])
###Output
_____no_output_____
###Markdown
Create the GridIntersect class for our modelgrid. The keyword arguments are shown below, but as these are the default options, they do not need to be passed necesssarily.
###Code
ix = GridIntersect(sgr, method="vertex", rtree=True)
###Output
_____no_output_____
###Markdown
Do the intersect operation for a polygon
###Code
%timeit ix.intersect(p)
result = ix.intersect(p)
###Output
_____no_output_____
###Markdown
The results are returned as a numpy.recarray containing several fields based on the intersection performed. An explanation of the data in each of the possible fields is given below:- **cellids**: contains the cell ids of the intersected grid cells- **vertices**: contains the vertices of the intersected shape- **areas**: contains the area of the polygon in that grid cell (only for polygons)- **lenghts**: contains the length of the linestring in that grid cell (only for linestrings)- **ixshapes**: contains the shapely object representing the intersected shape (useful for plotting the result)Looking at the first few entries of the results of the polygon intersection (convert to pandas.DataFrame for prettier formatting)
###Code
result[:5]
# pd.DataFrame(result) # for prettier formatting
###Output
_____no_output_____
###Markdown
The cellids can be easily obtained
###Code
result.cellids
###Output
_____no_output_____
###Markdown
Or the areas
###Code
result.areas
###Output
_____no_output_____
###Markdown
If a user is only interested in which cells the shape intersects (and not the areas or the actual shape of the intersected object) with there is also the `intersects()` method. This method works for all types of shapely geometries.
###Code
ix.intersects(p)
###Output
_____no_output_____
###Markdown
The results of an intersection can be visualized with the plotting methods in the `GridIntersect` object:- `plot_polygon`- `plot_linestring`- `plot_point`
###Code
# create a figure and plot the grid
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
# the intersection object contains some helpful plotting commands
ix.plot_polygon(result, ax=ax)
# add black x at cell centers
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0],
"kx", label="centroids of intersected gridcells")
# add legend
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Alternatively, the intersection can be calculated using special methods optimized for structured grids. Access these methods by instantiating the GridIntersect class with the `method="structured"` keyword argument.
###Code
ixs = GridIntersect(sgr, method="structured")
###Output
_____no_output_____
###Markdown
The methods are optimized for structured grids, but for certain types of polygons there is no benefit (as can be seen in this example).
###Code
%timeit ixs.intersect(p)
###Output
11.1 ms ± 828 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
The result is the same as before:
###Code
result2 = ixs.intersect(p)
result2[:5]
###Output
_____no_output_____
###Markdown
[Polyline with regular grid](top)MultiLineString to intersect with:
###Code
ls1 = LineString([(95, 105), (30, 50)])
ls2 = LineString([(30, 50), (90, 22)])
ls3 = LineString([(90, 22), (0, 0)])
mls = MultiLineString(lines=[ls1, ls2, ls3])
%timeit ix.intersect(mls)
result = ix.intersect(mls)
###Output
_____no_output_____
###Markdown
Plot the result
###Code
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_linestring(result, ax=ax)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect(mls)
###Output
7.22 ms ± 277 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
[MultiPoint with regular grid](top)MultiPoint to intersect with
###Code
mp = MultiPoint(points=[Point(50.0, 0.0), Point(45., 45.),
Point(10., 10.), Point(150., 100.)])
%timeit ix.intersect(mp)
result = ix.intersect(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_point(result, ax=ax, s=50)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect(mp)
ixs.intersect(mp)
###Output
_____no_output_____
###Markdown
[Vertex Grid](top)
###Code
cell2d = [[0, 83.33333333333333, 66.66666666666667, 3, 4, 2, 7],
[1, 16.666666666666668, 33.333333333333336, 3, 4, 0, 5],
[2, 33.333333333333336, 83.33333333333333, 3, 1, 8, 4],
[3, 16.666666666666668, 66.66666666666667, 3, 5, 1, 4],
[4, 33.333333333333336, 16.666666666666668, 3, 6, 0, 4],
[5, 66.66666666666667, 16.666666666666668, 3, 4, 3, 6],
[6, 83.33333333333333, 33.333333333333336, 3, 7, 3, 4],
[7, 66.66666666666667, 83.33333333333333, 3, 8, 2, 4]]
vertices = [[0, 0.0, 0.0],
[1, 0.0, 100.0],
[2, 100.0, 100.0],
[3, 100.0, 0.0],
[4, 50.0, 50.0],
[5, 0.0, 50.0],
[6, 50.0, 0.0],
[7, 100.0, 50.0],
[8, 50.0, 100.0]]
tgr = fgrid.VertexGrid(vertices, cell2d)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(modelgrid=tgr)
pmv.plot_grid(ax=ax)
###Output
_____no_output_____
###Markdown
[Polygon with triangular grid](top)
###Code
ix2 = GridIntersect(tgr)
result = ix2.intersect(p)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[LineString with triangular grid](top)
###Code
result = ix2.intersect(mls)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_linestring(result, ax=ax, lw=3)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[MultiPoint with triangular grid](top)
###Code
result = ix2.intersect(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_point(result, ax=ax)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid],
"kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Intersecting model grids with shapes_Note: This feature requires the shapely and descartes packages (which are not a FloPy dependency) so must be installed by the user._This notebook shows the grid intersection functionality in flopy. The intersection methods are available through the GridIntersect object. A flopy modelgrid is passed to instantiate the object. Then the modelgrid can be intersected with Points, LineStrings and Polygons through the different intersection methods. There are three intersection modes: - the first (default mode) builds an STR-tree for fast spatial queries before calculating intersections, thereby reducing the number of grid cells it has to process. This method works for structured and vertex grids.- the second method does not construct the STR-tree, and loops through all gridcells to determine the intersection between the grid and the shape. This method also works for structured and vertex grids.- the third method only works for structured grids and uses information from the structured grid to limit the search space for intersections.This notebook showcases the functionality of the GridIntersect class. Table of Contents- [GridIntersect Class](gridclass)- [Rectangular regular grid](rectgrid) - [Polygon with regular grid](rectgrid.1) - [MultiLineString with regular grid](rectgrid.2) - [MultiPoint with regular grid](rectgrid.3)- [Vertex grid](trigrid) - [Polygon with triangular grid](trigrid.1) - [MultiLineString with triangular grid](trigrid.2) - [MultiPoint with triangular grid](trigrid.3) Import some stuff
###Code
import sys
import os
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.insert(1, "../..")
# run installed version of flopy or add local path
try:
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
import shapely
from shapely.geometry import Polygon, Point, LineString, MultiLineString, MultiPoint, MultiPolygon
from shapely.strtree import STRtree
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
###Output
flopy is installed in /home/david/Github/flopy_db/flopy
3.7.6 (default, Jan 8 2020, 19:59:22)
[GCC 7.3.0]
numpy version: 1.18.1
matplotlib version: 3.1.3
flopy version: 3.3.1
###Markdown
[GridIntersect Class](top)The GridIntersect class is constructed by passing a flopy modelgrid object to the constructor. There are options users can select to change how the intersection is calculated.- `method`: either `"vertex"` (default) or `"structured"`. If `"structured"` is passed, the intersections are performed using structured methods. These methods use information about the regular grid to limit the search space for intersection calculations. - `rtree`: either `True` (default) or `False`, only read when `method="vertex"`. When True, an STR-tree is built, which allows for fast spatial queries. Building the STR-tree does take some time however. Setting the option to False avoids building the STR-tree but requires the intersection calculation to essentially loop through all grid cells.In general the default option is robust (it always works) and fast and is therefore recommended in most situations. If you are working with a structured grid, then the `method="structured"` can speed up intersection operations (especially for points and linestrings) with the added advantage of not having to build an STR-tree. In some cases with vertex grids, it might not be worth your time building the STR-tree, in which case it can be avoided by passing `rtree=False`.The important methods in the GridIntersect object are:- `intersects()`: returns cellids for gridcells that intersect a shape- `intersect_point()`: for intersecting the modelgrid with point geometries- `intersect_linestring()`: for intersecting the modelgrid with linestrings- `intersect_polygon()`: for intersecting the modelgrid with polygons- `plot_point()`: for plotting point intersection results- `plot_linestring()`: for plotting linestring intersection results- `plot_polygon()`: for plotting polygon intersection resultsIn the following sections examples of intersections are shown for structured and vertex grids for different types of shapes (Polygon, LineString and Point). [Rectangular regular grid](top)
###Code
delc = 10*np.ones(10, dtype=np.float)
delr = 10*np.ones(10, dtype=np.float)
xoff = 0.
yoff = 0.
angrot = 0.
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None, xoff=xoff, yoff=yoff, angrot=angrot)
sgr.plot()
###Output
_____no_output_____
###Markdown
[Polygon with regular grid](top)Polygon to intersect with:
###Code
p = Polygon(shell=[(15, 15), (20, 50), (35, 80.), (80, 50),
(80, 40), (40, 5), (15, 12)],
holes=[[(25, 25), (25, 45), (45, 45), (45, 25)]])
###Output
_____no_output_____
###Markdown
Create the GridIntersect class for our modelgrid. The keyword arguments are shown below, but as these are the default options, they do not need to be passed necesssarily.
###Code
ix = GridIntersect(sgr, method="vertex", rtree=True)
###Output
_____no_output_____
###Markdown
Do the intersect operation for a polygon
###Code
%timeit ix.intersect_polygon(p)
result = ix.intersect_polygon(p)
###Output
_____no_output_____
###Markdown
The results are returned as a numpy.recarray containing several fields based on the intersection performed. An explanation of the data in each of the possible fields is given below:- **cellids**: contains the cell ids of the intersected grid cells- **vertices**: contains the vertices of the intersected shape- **areas**: contains the area of the polygon in that grid cell (only for polygons)- **lenghts**: contains the length of the linestring in that grid cell (only for linestrings)- **ixshapes**: contains the shapely object representing the intersected shape (useful for plotting the result)Looking at the first few entries of the results of the polygon intersection (convert to pandas.DataFrame for prettier formatting)
###Code
result[:5]
# pd.DataFrame(result) # for prettier formatting
###Output
_____no_output_____
###Markdown
The cellids can be easily obtained
###Code
result.cellids
###Output
_____no_output_____
###Markdown
Or the areas
###Code
result.areas
###Output
_____no_output_____
###Markdown
If a user is only interested in which cells the shape intersects (and not the areas or the actual shape of the intersected object) with there is also the `intersects()` method. This method works for all types of shapely geometries.
###Code
ix.intersects(p)
###Output
_____no_output_____
###Markdown
The results of an intersection can be visualized with the plotting methods in the `GridIntersect` object:- `plot_polygon`- `plot_linestring`- `plot_point`
###Code
# create a figure and plot the grid
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
# the intersection object contains some helpful plotting commands
ix.plot_polygon(result, ax=ax)
# add black x at cell centers
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0],
"kx", label="centroids of intersected gridcells")
# add legend
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Alternatively, the intersection can be calculated using special methods optimized for structured grids. Access these methods by instantiating the GridIntersect class with the `method="structured"` keyword argument.
###Code
ixs = GridIntersect(sgr, method="structured")
###Output
_____no_output_____
###Markdown
The methods are optimized for structured grids, but for certain types of polygons there is no benefit (as can be seen in this example).
###Code
%timeit ixs.intersect_polygon(p)
###Output
9.06 ms ± 1.38 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
The result is the same as before:
###Code
result2 = ixs.intersect_polygon(p)
result2[:5]
###Output
_____no_output_____
###Markdown
[Polyline with regular grid](top)MultiLineString to intersect with:
###Code
ls1 = LineString([(95, 105), (30, 50)])
ls2 = LineString([(30, 50), (90, 22)])
ls3 = LineString([(90, 22), (0, 0)])
mls = MultiLineString(lines=[ls1, ls2, ls3])
%timeit ix.intersect_linestring(mls)
result = ix.intersect_linestring(mls)
###Output
_____no_output_____
###Markdown
Plot the result
###Code
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_linestring(result, ax=ax)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect_linestring(mls)
###Output
6.42 ms ± 1.17 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
[MultiPoint with regular grid](top)MultiPoint to intersect with
###Code
mp = MultiPoint(points=[Point(50.0, 0.0), Point(45., 45.),
Point(10., 10.), Point(150., 100.)])
%timeit ix.intersect_point(mp)
result = ix.intersect_point(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_point(result, ax=ax, s=50)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect_point(mp)
ixs.intersect_point(mp)
###Output
_____no_output_____
###Markdown
[Vertex Grid](top)
###Code
cell2d = [[0, 83.33333333333333, 66.66666666666667, 3, 4, 2, 7],
[1, 16.666666666666668, 33.333333333333336, 3, 4, 0, 5],
[2, 33.333333333333336, 83.33333333333333, 3, 1, 8, 4],
[3, 16.666666666666668, 66.66666666666667, 3, 5, 1, 4],
[4, 33.333333333333336, 16.666666666666668, 3, 6, 0, 4],
[5, 66.66666666666667, 16.666666666666668, 3, 4, 3, 6],
[6, 83.33333333333333, 33.333333333333336, 3, 7, 3, 4],
[7, 66.66666666666667, 83.33333333333333, 3, 8, 2, 4]]
vertices = [[0, 0.0, 0.0],
[1, 0.0, 100.0],
[2, 100.0, 100.0],
[3, 100.0, 0.0],
[4, 50.0, 50.0],
[5, 0.0, 50.0],
[6, 50.0, 0.0],
[7, 100.0, 50.0],
[8, 50.0, 100.0]]
tgr = fgrid.VertexGrid(vertices, cell2d)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(modelgrid=tgr)
pmv.plot_grid(ax=ax)
###Output
_____no_output_____
###Markdown
[Polygon with triangular grid](top)
###Code
ix2 = GridIntersect(tgr)
result = ix2.intersect_polygon(p)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[LineString with triangular grid](top)
###Code
result = ix2.intersect_linestring(mls)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_linestring(result, ax=ax, lw=3)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[MultiPoint with triangular grid](top)
###Code
result = ix2.intersect_point(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_point(result, ax=ax)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid],
"kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Intersecting model grids with shapes_Note: This feature requires the shapely and descartes packages (which are not a FloPy dependency) so must be installed by the user._This notebook shows the grid intersection functionality in flopy. The intersection methods are available through the GridIntersect object. A flopy modelgrid is passed to instantiate the object. Then the modelgrid can be intersected with Points, LineStrings and Polygons through the different intersection methods. There are three intersection modes: - the first (default mode) builds an STR-tree for fast spatial queries before calculating intersections, thereby reducing the number of grid cells it has to process. This method works for structured and vertex grids.- the second method does not construct the STR-tree, and loops through all gridcells to determine the intersection between the grid and the shape. This method also works for structured and vertex grids.- the third method only works for structured grids and uses information from the structured grid to limit the search space for intersections.This notebook showcases the functionality of the GridIntersect class. Table of Contents- [GridIntersect Class](gridclass)- [Rectangular regular grid](rectgrid) - [Polygon with regular grid](rectgrid.1) - [MultiLineString with regular grid](rectgrid.2) - [MultiPoint with regular grid](rectgrid.3)- [Vertex grid](trigrid) - [Polygon with triangular grid](trigrid.1) - [MultiLineString with triangular grid](trigrid.2) - [MultiPoint with triangular grid](trigrid.3) Import some stuff
###Code
import sys
import os
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.insert(1, "../..")
# run installed version of flopy or add local path
try:
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
import shapely
from shapely.geometry import Polygon, Point, LineString, MultiLineString, MultiPoint, MultiPolygon
from shapely.strtree import STRtree
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
###Output
3.8.10 (default, May 19 2021, 11:01:55)
[Clang 10.0.0 ]
numpy version: 1.19.2
matplotlib version: 3.4.2
flopy version: 3.3.5
###Markdown
[GridIntersect Class](top)The GridIntersect class is constructed by passing a flopy modelgrid object to the constructor. There are options users can select to change how the intersection is calculated.- `method`: either `"vertex"` (default) or `"structured"`. If `"structured"` is passed, the intersections are performed using structured methods. These methods use information about the regular grid to limit the search space for intersection calculations. - `rtree`: either `True` (default) or `False`, only read when `method="vertex"`. When True, an STR-tree is built, which allows for fast spatial queries. Building the STR-tree does take some time however. Setting the option to False avoids building the STR-tree but requires the intersection calculation to essentially loop through all grid cells.In general the default option is robust (it always works) and fast and is therefore recommended in most situations. If you are working with a structured grid, then the `method="structured"` can speed up intersection operations (especially for points and linestrings) with the added advantage of not having to build an STR-tree. In some cases with vertex grids, it might not be worth your time building the STR-tree, in which case it can be avoided by passing `rtree=False`.The important methods in the GridIntersect object are:- `intersects()`: returns cellids for gridcells that intersect a shape- `intersect()`: for intersecting the modelgrid with point, linestrings, and polygon geometries (intersect can accept shapely geometry objects, flopy geometry object, shapefile.Shape objects, and geojson objects)- `plot_point()`: for plotting point intersection results- `plot_linestring()`: for plotting linestring intersection results- `plot_polygon()`: for plotting polygon intersection resultsIn the following sections examples of intersections are shown for structured and vertex grids for different types of shapes (Polygon, LineString and Point). [Rectangular regular grid](top)
###Code
delc = 10*np.ones(10, dtype=float)
delr = 10*np.ones(10, dtype=float)
xoff = 0.
yoff = 0.
angrot = 0.
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None, xoff=xoff, yoff=yoff, angrot=angrot)
sgr.plot()
###Output
_____no_output_____
###Markdown
[Polygon with regular grid](top)Polygon to intersect with:
###Code
p = Polygon(shell=[(15, 15), (20, 50), (35, 80.), (80, 50),
(80, 40), (40, 5), (15, 12)],
holes=[[(25, 25), (25, 45), (45, 45), (45, 25)]])
###Output
_____no_output_____
###Markdown
Create the GridIntersect class for our modelgrid. The keyword arguments are shown below, but as these are the default options, they do not need to be passed necesssarily.
###Code
ix = GridIntersect(sgr, method="vertex", rtree=True)
###Output
_____no_output_____
###Markdown
Do the intersect operation for a polygon
###Code
%timeit ix.intersect(p)
result = ix.intersect(p)
###Output
_____no_output_____
###Markdown
The results are returned as a numpy.recarray containing several fields based on the intersection performed. An explanation of the data in each of the possible fields is given below:- **cellids**: contains the cell ids of the intersected grid cells- **vertices**: contains the vertices of the intersected shape- **areas**: contains the area of the polygon in that grid cell (only for polygons)- **lenghts**: contains the length of the linestring in that grid cell (only for linestrings)- **ixshapes**: contains the shapely object representing the intersected shape (useful for plotting the result)Looking at the first few entries of the results of the polygon intersection (convert to pandas.DataFrame for prettier formatting)
###Code
result[:5]
# pd.DataFrame(result) # for prettier formatting
###Output
_____no_output_____
###Markdown
The cellids can be easily obtained
###Code
result.cellids
###Output
_____no_output_____
###Markdown
Or the areas
###Code
result.areas
###Output
_____no_output_____
###Markdown
If a user is only interested in which cells the shape intersects (and not the areas or the actual shape of the intersected object) with there is also the `intersects()` method. This method works for all types of shapely geometries.
###Code
ix.intersects(p)
###Output
_____no_output_____
###Markdown
The results of an intersection can be visualized with the plotting methods in the `GridIntersect` object:- `plot_polygon`- `plot_linestring`- `plot_point`
###Code
# create a figure and plot the grid
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
# the intersection object contains some helpful plotting commands
ix.plot_polygon(result, ax=ax)
# add black x at cell centers
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0],
"kx", label="centroids of intersected gridcells")
# add legend
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Alternatively, the intersection can be calculated using special methods optimized for structured grids. Access these methods by instantiating the GridIntersect class with the `method="structured"` keyword argument.
###Code
ixs = GridIntersect(sgr, method="structured")
###Output
_____no_output_____
###Markdown
The methods are optimized for structured grids, but for certain types of polygons there is no benefit (as can be seen in this example).
###Code
%timeit ixs.intersect(p)
###Output
10.6 ms ± 1.13 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
The result is the same as before:
###Code
result2 = ixs.intersect(p)
result2[:5]
###Output
_____no_output_____
###Markdown
[Polyline with regular grid](top)MultiLineString to intersect with:
###Code
ls1 = LineString([(95, 105), (30, 50)])
ls2 = LineString([(30, 50), (90, 22)])
ls3 = LineString([(90, 22), (0, 0)])
mls = MultiLineString(lines=[ls1, ls2, ls3])
%timeit ix.intersect(mls)
result = ix.intersect(mls)
###Output
_____no_output_____
###Markdown
Plot the result
###Code
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_linestring(result, ax=ax, cmap="viridis")
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect(mls)
###Output
5.66 ms ± 370 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
[MultiPoint with regular grid](top)MultiPoint to intersect with
###Code
mp = MultiPoint(points=[Point(50.0, 0.0), Point(45., 45.),
Point(10., 10.), Point(150., 100.)])
%timeit ix.intersect(mp)
result = ix.intersect(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_point(result, ax=ax, s=50)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect(mp)
ixs.intersect(mp)
###Output
_____no_output_____
###Markdown
[Vertex Grid](top)
###Code
cell2d = [[0, 83.33333333333333, 66.66666666666667, 3, 4, 2, 7],
[1, 16.666666666666668, 33.333333333333336, 3, 4, 0, 5],
[2, 33.333333333333336, 83.33333333333333, 3, 1, 8, 4],
[3, 16.666666666666668, 66.66666666666667, 3, 5, 1, 4],
[4, 33.333333333333336, 16.666666666666668, 3, 6, 0, 4],
[5, 66.66666666666667, 16.666666666666668, 3, 4, 3, 6],
[6, 83.33333333333333, 33.333333333333336, 3, 7, 3, 4],
[7, 66.66666666666667, 83.33333333333333, 3, 8, 2, 4]]
vertices = [[0, 0.0, 0.0],
[1, 0.0, 100.0],
[2, 100.0, 100.0],
[3, 100.0, 0.0],
[4, 50.0, 50.0],
[5, 0.0, 50.0],
[6, 50.0, 0.0],
[7, 100.0, 50.0],
[8, 50.0, 100.0]]
tgr = fgrid.VertexGrid(vertices, cell2d)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(modelgrid=tgr)
pmv.plot_grid(ax=ax)
###Output
_____no_output_____
###Markdown
[Polygon with triangular grid](top)
###Code
ix2 = GridIntersect(tgr)
result = ix2.intersect(p)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[LineString with triangular grid](top)
###Code
result = ix2.intersect(mls)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_linestring(result, ax=ax, lw=3)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[MultiPoint with triangular grid](top)
###Code
result = ix2.intersect(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_point(result, ax=ax)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid],
"kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Intersecting model grids with shapes_Note: This feature requires the shapely and descartes packages (which are not a FloPy dependency) so must be installed by the user._This notebook shows the grid intersection functionality in flopy. The intersection methods are available through the GridIntersect object. A flopy modelgrid is passed to instantiate the object. Then the modelgrid can be intersected with Points, LineStrings and Polygons through the different intersection methods. There are three intersection modes: - the first (default mode) builds an STR-tree for fast spatial queries before calculating intersections, thereby reducing the number of grid cells it has to process. This method works for structured and vertex grids.- the second method does not construct the STR-tree, and loops through all gridcells to determine the intersection between the grid and the shape. This method also works for structured and vertex grids.- the third method only works for structured grids and uses information from the structured grid to limit the search space for intersections.This notebook showcases the functionality of the GridIntersect class. Table of Contents- [GridIntersect Class](gridclass)- [Rectangular regular grid](rectgrid) - [Polygon with regular grid](rectgrid.1) - [MultiLineString with regular grid](rectgrid.2) - [MultiPoint with regular grid](rectgrid.3)- [Vertex grid](trigrid) - [Polygon with triangular grid](trigrid.1) - [MultiLineString with triangular grid](trigrid.2) - [MultiPoint with triangular grid](trigrid.3) Import some stuff
###Code
import sys
import os
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.insert(1, "../..")
# run installed version of flopy or add local path
try:
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
except:
fpth = os.path.abspath(os.path.join("..", ".."))
sys.path.append(fpth)
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
import shapely
from shapely.geometry import (
Polygon,
Point,
LineString,
MultiLineString,
MultiPoint,
MultiPolygon,
)
from shapely.strtree import STRtree
print(sys.version)
print("numpy version: {}".format(np.__version__))
print("matplotlib version: {}".format(mpl.__version__))
print("flopy version: {}".format(flopy.__version__))
###Output
3.8.11 (default, Aug 6 2021, 08:56:27)
[Clang 10.0.0 ]
numpy version: 1.19.2
matplotlib version: 3.4.2
flopy version: 3.3.5
###Markdown
[GridIntersect Class](top)The GridIntersect class is constructed by passing a flopy modelgrid object to the constructor. There are options users can select to change how the intersection is calculated.- `method`: either `"vertex"` (default) or `"structured"`. If `"structured"` is passed, the intersections are performed using structured methods. These methods use information about the regular grid to limit the search space for intersection calculations. - `rtree`: either `True` (default) or `False`, only read when `method="vertex"`. When True, an STR-tree is built, which allows for fast spatial queries. Building the STR-tree does take some time however. Setting the option to False avoids building the STR-tree but requires the intersection calculation to essentially loop through all grid cells.In general the default option is robust (it always works) and fast and is therefore recommended in most situations. If you are working with a structured grid, then the `method="structured"` can speed up intersection operations (especially for points and linestrings) with the added advantage of not having to build an STR-tree. In some cases with vertex grids, it might not be worth your time building the STR-tree, in which case it can be avoided by passing `rtree=False`.The important methods in the GridIntersect object are:- `intersects()`: returns cellids for gridcells that intersect a shape- `intersect()`: for intersecting the modelgrid with point, linestrings, and polygon geometries (intersect can accept shapely geometry objects, flopy geometry object, shapefile.Shape objects, and geojson objects)- `plot_point()`: for plotting point intersection results- `plot_linestring()`: for plotting linestring intersection results- `plot_polygon()`: for plotting polygon intersection resultsIn the following sections examples of intersections are shown for structured and vertex grids for different types of shapes (Polygon, LineString and Point). [Rectangular regular grid](top)
###Code
delc = 10 * np.ones(10, dtype=float)
delr = 10 * np.ones(10, dtype=float)
xoff = 0.0
yoff = 0.0
angrot = 0.0
sgr = fgrid.StructuredGrid(
delc, delr, top=None, botm=None, xoff=xoff, yoff=yoff, angrot=angrot
)
sgr.plot()
###Output
_____no_output_____
###Markdown
[Polygon with regular grid](top)Polygon to intersect with:
###Code
p = Polygon(
shell=[
(15, 15),
(20, 50),
(35, 80.0),
(80, 50),
(80, 40),
(40, 5),
(15, 12),
],
holes=[[(25, 25), (25, 45), (45, 45), (45, 25)]],
)
###Output
_____no_output_____
###Markdown
Create the GridIntersect class for our modelgrid. The keyword arguments are shown below, but as these are the default options, they do not need to be passed necesssarily.
###Code
ix = GridIntersect(sgr, method="vertex", rtree=True)
###Output
_____no_output_____
###Markdown
Do the intersect operation for a polygon
###Code
%timeit ix.intersect(p)
result = ix.intersect(p)
###Output
_____no_output_____
###Markdown
The results are returned as a numpy.recarray containing several fields based on the intersection performed. An explanation of the data in each of the possible fields is given below:- **cellids**: contains the cell ids of the intersected grid cells- **vertices**: contains the vertices of the intersected shape- **areas**: contains the area of the polygon in that grid cell (only for polygons)- **lenghts**: contains the length of the linestring in that grid cell (only for linestrings)- **ixshapes**: contains the shapely object representing the intersected shape (useful for plotting the result)Looking at the first few entries of the results of the polygon intersection (convert to pandas.DataFrame for prettier formatting)
###Code
result[:5]
# pd.DataFrame(result) # for prettier formatting
###Output
_____no_output_____
###Markdown
The cellids can be easily obtained
###Code
result.cellids
###Output
_____no_output_____
###Markdown
Or the areas
###Code
result.areas
###Output
_____no_output_____
###Markdown
If a user is only interested in which cells the shape intersects (and not the areas or the actual shape of the intersected object) with there is also the `intersects()` method. This method works for all types of shapely geometries.
###Code
ix.intersects(p)
###Output
_____no_output_____
###Markdown
The results of an intersection can be visualized with the plotting methods in the `GridIntersect` object:- `plot_polygon`- `plot_linestring`- `plot_point`
###Code
# create a figure and plot the grid
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
# the intersection object contains some helpful plotting commands
ix.plot_polygon(result, ax=ax)
# add black x at cell centers
for irow, icol in result.cellids:
(h2,) = ax.plot(
sgr.xcellcenters[0, icol],
sgr.ycellcenters[irow, 0],
"kx",
label="centroids of intersected gridcells",
)
# add legend
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Alternatively, the intersection can be calculated using special methods optimized for structured grids. Access these methods by instantiating the GridIntersect class with the `method="structured"` keyword argument.
###Code
ixs = GridIntersect(sgr, method="structured")
###Output
_____no_output_____
###Markdown
The methods are optimized for structured grids, but for certain types of polygons there is no benefit (as can be seen in this example).
###Code
%timeit ixs.intersect(p)
###Output
12.6 ms ± 1.25 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
The result is the same as before:
###Code
result2 = ixs.intersect(p)
result2[:5]
###Output
_____no_output_____
###Markdown
[Polyline with regular grid](top)MultiLineString to intersect with:
###Code
ls1 = LineString([(95, 105), (30, 50)])
ls2 = LineString([(30, 50), (90, 22)])
ls3 = LineString([(90, 22), (0, 0)])
mls = MultiLineString(lines=[ls1, ls2, ls3])
%timeit ix.intersect(mls)
result = ix.intersect(mls)
###Output
_____no_output_____
###Markdown
Plot the result
###Code
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_linestring(result, ax=ax, cmap="viridis")
for irow, icol in result.cellids:
(h2,) = ax.plot(
sgr.xcellcenters[0, icol],
sgr.ycellcenters[irow, 0],
"kx",
label="centroids of intersected gridcells",
)
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect(mls)
###Output
9.14 ms ± 1.23 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
[MultiPoint with regular grid](top)MultiPoint to intersect with
###Code
mp = MultiPoint(
points=[
Point(50.0, 0.0),
Point(45.0, 45.0),
Point(10.0, 10.0),
Point(150.0, 100.0),
]
)
%timeit ix.intersect(mp)
result = ix.intersect(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_point(result, ax=ax, s=50)
for irow, icol in result.cellids:
(h2,) = ax.plot(
sgr.xcellcenters[0, icol],
sgr.ycellcenters[irow, 0],
"kx",
label="centroids of intersected cells",
)
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect(mp)
ixs.intersect(mp)
###Output
_____no_output_____
###Markdown
[Vertex Grid](top)
###Code
cell2d = [
[0, 83.33333333333333, 66.66666666666667, 3, 4, 2, 7],
[1, 16.666666666666668, 33.333333333333336, 3, 4, 0, 5],
[2, 33.333333333333336, 83.33333333333333, 3, 1, 8, 4],
[3, 16.666666666666668, 66.66666666666667, 3, 5, 1, 4],
[4, 33.333333333333336, 16.666666666666668, 3, 6, 0, 4],
[5, 66.66666666666667, 16.666666666666668, 3, 4, 3, 6],
[6, 83.33333333333333, 33.333333333333336, 3, 7, 3, 4],
[7, 66.66666666666667, 83.33333333333333, 3, 8, 2, 4],
]
vertices = [
[0, 0.0, 0.0],
[1, 0.0, 100.0],
[2, 100.0, 100.0],
[3, 100.0, 0.0],
[4, 50.0, 50.0],
[5, 0.0, 50.0],
[6, 50.0, 0.0],
[7, 100.0, 50.0],
[8, 50.0, 100.0],
]
tgr = fgrid.VertexGrid(vertices, cell2d)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(modelgrid=tgr)
pmv.plot_grid(ax=ax)
###Output
_____no_output_____
###Markdown
[Polygon with triangular grid](top)
###Code
ix2 = GridIntersect(tgr)
result = ix2.intersect(p)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for cellid in result.cellids:
(h2,) = ax.plot(
tgr.xcellcenters[cellid],
tgr.ycellcenters[cellid],
"kx",
label="centroids of intersected gridcells",
)
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[LineString with triangular grid](top)
###Code
result = ix2.intersect(mls)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_linestring(result, ax=ax, lw=3)
for cellid in result.cellids:
(h2,) = ax.plot(
tgr.xcellcenters[cellid],
tgr.ycellcenters[cellid],
"kx",
label="centroids of intersected gridcells",
)
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[MultiPoint with triangular grid](top)
###Code
result = ix2.intersect(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_point(result, ax=ax)
for cellid in result.cellids:
(h2,) = ax.plot(
tgr.xcellcenters[cellid],
tgr.ycellcenters[cellid],
"kx",
label="centroids of intersected cells",
)
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Intersecting model grids with shapes_Note: This feature requires the shapely and descartes packages (which are not a FloPy dependency) so must be installed by the user._This notebook shows the grid intersection functionality in flopy. The intersection methods are available through the GridIntersect object. A flopy modelgrid is passed to instantiate the object. Then the modelgrid can be intersected with Points, LineStrings and Polygons through the different intersection methods. There are three intersection modes: - the first (default mode) builds an STR-tree for fast spatial queries before calculating intersections, thereby reducing the number of grid cells it has to process. This method works for structured and vertex grids.- the second method does not construct the STR-tree, and loops through all gridcells to determine the intersection between the grid and the shape. This method also works for structured and vertex grids.- the third method only works for structured grids and uses information from the structured grid to limit the search space for intersections.This notebook showcases the functionality of the GridIntersect class. Table of Contents- [GridIntersect Class](gridclass)- [Rectangular regular grid](rectgrid) - [Polygon with regular grid](rectgrid.1) - [MultiLineString with regular grid](rectgrid.2) - [MultiPoint with regular grid](rectgrid.3)- [Vertex grid](trigrid) - [Polygon with triangular grid](trigrid.1) - [MultiLineString with triangular grid](trigrid.2) - [MultiPoint with triangular grid](trigrid.3) Import some stuff
###Code
import sys
import os
import platform
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
sys.path.insert(1, "../..")
# run installed version of flopy or add local path
try:
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
import flopy.discretization as fgrid
import flopy.plot as fplot
from flopy.utils.gridintersect import GridIntersect
import shapely
from shapely.geometry import Polygon, Point, LineString, MultiLineString, MultiPoint, MultiPolygon
from shapely.strtree import STRtree
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
###Output
flopy is installed in /Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy
3.7.3 | packaged by conda-forge | (default, Jul 1 2019, 14:38:56)
[Clang 4.0.1 (tags/RELEASE_401/final)]
numpy version: 1.17.3
matplotlib version: 3.1.1
flopy version: 3.3.1
###Markdown
[GridIntersect Class](top)The GridIntersect class is constructed by passing a flopy modelgrid object to the constructor. There are options users can select to change how the intersection is calculated.- `method`: either `"vertex"` (default) or `"structured"`. If `"structured"` is passed, the intersections are performed using structured methods. These methods use information about the regular grid to limit the search space for intersection calculations. - `rtree`: either `True` (default) or `False`, only read when `method="vertex"`. When True, an STR-tree is built, which allows for fast spatial queries. Building the STR-tree does take some time however. Setting the option to False avoids building the STR-tree but requires the intersection calculation to essentially loop through all grid cells.In general the default option is robust (it always works) and fast and is therefore recommended in most situations. If you are working with a structured grid, then the `method="structured"` can speed up intersection operations (especially for points and linestrings) with the added advantage of not having to build an STR-tree. In some cases with vertex grids, it might not be worth your time building the STR-tree, in which case it can be avoided by passing `rtree=False`.The important methods in the GridIntersect object are:- `intersects()`: returns cellids for gridcells that intersect a shape- `intersect_point()`: for intersecting the modelgrid with point geometries- `intersect_linestring()`: for intersecting the modelgrid with linestrings- `intersect_polygon()`: for intersecting the modelgrid with polygons- `plot_point()`: for plotting point intersection results- `plot_linestring()`: for plotting linestring intersection results- `plot_polygon()`: for plotting polygon intersection resultsIn the following sections examples of intersections are shown for structured and vertex grids for different types of shapes (Polygon, LineString and Point). [Rectangular regular grid](top)
###Code
delc = 10*np.ones(10, dtype=np.float)
delr = 10*np.ones(10, dtype=np.float)
xoff = 0.
yoff = 0.
angrot = 0.
sgr = fgrid.StructuredGrid(delc, delr, top=None, botm=None, xoff=xoff, yoff=yoff, angrot=angrot)
sgr.plot()
###Output
_____no_output_____
###Markdown
[Polygon with regular grid](top)Polygon to intersect with:
###Code
p = Polygon(shell=[(15, 15), (20, 50), (35, 80.), (80, 50),
(80, 40), (40, 5), (15, 12)],
holes=[[(25, 25), (25, 45), (45, 45), (45, 25)]])
###Output
_____no_output_____
###Markdown
Create the GridIntersect class for our modelgrid. The keyword arguments are shown below, but as these are the default options, they do not need to be passed necesssarily.
###Code
ix = GridIntersect(sgr, method="vertex", rtree=True)
###Output
_____no_output_____
###Markdown
Do the intersect operation for a polygon
###Code
%timeit ix.intersect_polygon(p)
result = ix.intersect_polygon(p)
###Output
_____no_output_____
###Markdown
The results are returned as a numpy.recarray containing several fields based on the intersection performed. An explanation of the data in each of the possible fields is given below:- **cellids**: contains the cell ids of the intersected grid cells- **vertices**: contains the vertices of the intersected shape- **areas**: contains the area of the polygon in that grid cell (only for polygons)- **lenghts**: contains the length of the linestring in that grid cell (only for linestrings)- **ixshapes**: contains the shapely object representing the intersected shape (useful for plotting the result)Looking at the first few entries of the results of the polygon intersection (convert to pandas.DataFrame for prettier formatting)
###Code
result[:5]
# pd.DataFrame(result) # for prettier formatting
###Output
_____no_output_____
###Markdown
The cellids can be easily obtained
###Code
result.cellids
###Output
_____no_output_____
###Markdown
Or the areas
###Code
result.areas
###Output
_____no_output_____
###Markdown
If a user is only interested in which cells the shape intersects (and not the areas or the actual shape of the intersected object) with there is also the `intersects()` method. This method works for all types of shapely geometries.
###Code
ix.intersects(p)
###Output
_____no_output_____
###Markdown
The results of an intersection can be visualized with the plotting methods in the `GridIntersect` object:- `plot_polygon`- `plot_linestring`- `plot_point`
###Code
# create a figure and plot the grid
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
# the intersection object contains some helpful plotting commands
ix.plot_polygon(result, ax=ax)
# add black x at cell centers
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0],
"kx", label="centroids of intersected gridcells")
# add legend
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Alternatively, the intersection can be calculated using special methods optimized for structured grids. Access these methods by instantiating the GridIntersect class with the `method="structured"` keyword argument.
###Code
ixs = GridIntersect(sgr, method="structured")
###Output
_____no_output_____
###Markdown
The methods are optimized for structured grids, but for certain types of polygons there is no benefit (as can be seen in this example).
###Code
%timeit ixs.intersect_polygon(p)
###Output
12.6 ms ± 139 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
The result is the same as before:
###Code
result2 = ixs.intersect_polygon(p)
result2[:5]
###Output
_____no_output_____
###Markdown
[Polyline with regular grid](top)MultiLineString to intersect with:
###Code
ls1 = LineString([(95, 105), (30, 50)])
ls2 = LineString([(30, 50), (90, 22)])
ls3 = LineString([(90, 22), (0, 0)])
mls = MultiLineString(lines=[ls1, ls2, ls3])
%timeit ix.intersect_linestring(mls)
result = ix.intersect_linestring(mls)
###Output
_____no_output_____
###Markdown
Plot the result
###Code
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_linestring(result, ax=ax)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect_linestring(mls)
###Output
7.48 ms ± 88.7 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
[MultiPoint with regular grid](top)MultiPoint to intersect with
###Code
mp = MultiPoint(points=[Point(50.0, 0.0), Point(45., 45.),
Point(10., 10.), Point(150., 100.)])
%timeit ix.intersect_point(mp)
result = ix.intersect_point(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
sgr.plot(ax=ax)
ix.plot_point(result, ax=ax, s=50)
for irow, icol in result.cellids:
h2, = ax.plot(sgr.xcellcenters[0, icol], sgr.ycellcenters[irow, 0], "kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
Same as before, the intersect for structured grids can also be performed with a different method optimized for structured grids
###Code
ixs = GridIntersect(sgr, method="structured")
%timeit ixs.intersect_point(mp)
ixs.intersect_point(mp)
###Output
_____no_output_____
###Markdown
[Vertex Grid](top)
###Code
cell2d = [[0, 83.33333333333333, 66.66666666666667, 3, 4, 2, 7],
[1, 16.666666666666668, 33.333333333333336, 3, 4, 0, 5],
[2, 33.333333333333336, 83.33333333333333, 3, 1, 8, 4],
[3, 16.666666666666668, 66.66666666666667, 3, 5, 1, 4],
[4, 33.333333333333336, 16.666666666666668, 3, 6, 0, 4],
[5, 66.66666666666667, 16.666666666666668, 3, 4, 3, 6],
[6, 83.33333333333333, 33.333333333333336, 3, 7, 3, 4],
[7, 66.66666666666667, 83.33333333333333, 3, 8, 2, 4]]
vertices = [[0, 0.0, 0.0],
[1, 0.0, 100.0],
[2, 100.0, 100.0],
[3, 100.0, 0.0],
[4, 50.0, 50.0],
[5, 0.0, 50.0],
[6, 50.0, 0.0],
[7, 100.0, 50.0],
[8, 50.0, 100.0]]
tgr = fgrid.VertexGrid(vertices, cell2d)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(modelgrid=tgr)
pmv.plot_grid(ax=ax)
###Output
_____no_output_____
###Markdown
[Polygon with triangular grid](top)
###Code
ix2 = GridIntersect(tgr)
result = ix2.intersect_polygon(p)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix.plot_polygon(result, ax=ax)
# only cells that intersect with shape
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[LineString with triangular grid](top)
###Code
result = ix2.intersect_linestring(mls)
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_linestring(result, ax=ax, lw=3)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid], "kx", label="centroids of intersected gridcells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____
###Markdown
[MultiPoint with triangular grid](top)
###Code
result = ix2.intersect_point(mp)
result
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
pmv = fplot.PlotMapView(ax=ax, modelgrid=tgr)
pmv.plot_grid()
ix2.plot_point(result, ax=ax)
for cellid in result.cellids:
h2, = ax.plot(tgr.xcellcenters[cellid], tgr.ycellcenters[cellid],
"kx", label="centroids of intersected cells")
ax.legend([h2], [i.get_label() for i in [h2]], loc="best");
###Output
_____no_output_____ |
05 - More data - the adult dataset.ipynb | ###Markdown
Using the adult dataset
###Code
adult = pd.read_csv("data/adult.csv", index_col=0)
adult.head()
adult.income.value_counts()
adult.income.value_counts().plot(kind="barh")
adult.education.value_counts()
adult.groupby("income")
adult.groupby("income")['education'].value_counts()
education_counts = adult.groupby("income")['education'].value_counts()
education_counts.unstack("income")
unstacked_education = education_counts.unstack("income")
unstacked_education.plot(kind="barh")
(unstacked_education / unstacked_education.sum(axis=0)).plot(kind="barh")
unstacked_education.columns
plt.figure()
(unstacked_education[" >50K"] / unstacked_education.sum(axis=1)).plot(kind="barh")
###Output
_____no_output_____
###Markdown
ExerciseGroup the data by gender, and compare the income distributions over genders.Do a similar plot for some of the other variables.
###Code
# solution
###Output
_____no_output_____
###Markdown
Using the adult dataset
###Code
adult = pd.read_csv("data/adult.csv", index_col=0)
adult.head()
adult.income.value_counts()
adult.income.value_counts().plot(kind="barh")
adult.education.value_counts()
adult.groupby("income")
adult.groupby("income")['education'].value_counts()
education_counts = adult.groupby("income")['education'].value_counts()
education_counts.unstack("income")
unstacked_education = education_counts.unstack("income")
unstacked_education.plot(kind="barh")
(unstacked_education / unstacked_education.sum(axis=0)).plot(kind="barh")
unstacked_education.columns
plt.figure()
(unstacked_education[" >50K"] / unstacked_education.sum(axis=1)).plot(kind="barh")
###Output
_____no_output_____
###Markdown
ExerciseGroup the data by gender, and compare the income distributions over genders.Do a similar plot for some of the other variables.
###Code
# solution
###Output
_____no_output_____ |
sslinearregression.ipynb | ###Markdown
Linear regression is used to predict the value of an outcome variable Y based on one or more input predictor variables X. The aim is to establish a linear relationship (a mathematical formula) between the predictor variable(s) and the response variable
###Code
import numpy as np
import math
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import style
import pandas as pd
import sklearn #for linear and other models
import warnings
from sklearn import linear_model
from sklearn.cross_validation import train_test_split
%matplotlib inline
#two lists xs and ys
xs=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
ys=[23,24,25,26,27,28,29,30,34,45,46,51,56,57,58,62,64,67,72,75,77,81,84,83]
len(xs),len(ys)
plt.scatter(xs,ys)
plt.ylabel("independent variable")
plt.xlabel("dependent variable")
plt.show()
#function for intercept and slope
def slope_intercept(x_val,y_val):
x=np.array(x_val)
y=np.array(y_val)
m=(((np.mean(x)*np.mean(y))-np.mean(x*y))/(np.mean(x)*np.mean(x))-np.mean(x*x))
m=round(m,2)
b=(np.mean(y)-np.mean(x)*m)
b=round(b,2)
return m,b
slope_intercept(xs,ys)
m,b=slope_intercept(xs,ys)
reg_line=[(m*x)+b for x in xs]
plt.scatter(xs,ys,color="red")
plt.plot(xs,reg_line)
plt.title("making a regression line")
plt.xlabel("independent variable")
plt.ylabel("dependent variable")
plt.show()
#rmse
import math
def rmsm(y1,y_h):
y_actual=np.array(y1)
y_pred=np.array(y_h)
error=(y_actual-y_pred)**2
error_mean=round(np.mean(error))
err_sq=math.sqrt(error_mean)
return err_sq
rmsm(ys,reg_line)
import scipy.stats as stats
import matplotlib.pyplot as plt
import sklearn
import statsmodels.api as sm
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("poster")
#special matplotlib argument for improved plots
from matplotlib import rcParams
from sklearn.datasets import load_boston
boston=load_boston()
df_x=pd.DataFrame(boston.data,columns=boston.feature_names)#data frame for independent variables
df_y=pd.DataFrame(boston.target)#dependent variable
print(boston.keys())
df_x.head(13)
df_y.head(10)
df_x.shape #tells number of rows and columns
print(boston.data.shape)
names=[i for i in list(df_x)]
names
print(boston.feature_names)
print(boston.DESCR)
#In a dataset a training set is implemented to build up a model, while a test (or validation) set
#is to validate the model built.
regr=linear_model.LinearRegression()
x_train,x_test,y_train,y_test=train_test_split(df_x,df_y,test_size=0.2,random_state=4)
#use 20% of total data for data test
x_train.head()
#fit linear regression model to training data set
regr.fit(x_train,y_train)
regr.intercept_
#the coefficients
print("coefficients are:",regr.coef_)
#mean squared error
print("mean squared error: ",np.mean((regr.predict(x_test)-y_test)**2))
#variance score:1 is perfect prediction
print("variance score:",regr.score(x_test,y_test))
#coefficients of Independent variables (slope (m) of the regression line)
regr.coef_[0].tolist()
#attach slopes to these variables
pd.DataFrame(zip(names,regr.coef_[0].tolist()),columns=['names','coefficients'])
#plotting predicted x_test,y_test values
style.use("bmh")
plt.scatter(regr.predict(x_test),y_test)
plt.show()
#calculate p value
import statsmodels.api as sm
from statsmodels.sandbox.regression.predstd import wls_prediction_std
modedl1=sm.OLS(y_train,x_train)
result=modedl1.fit()
print(result.summary())
#select variables with p-values <0.5
model2=sm.OLS(y_train,x_train[['CRIM','ZN','CHAS','RM','DIS','RAD','TAX','PTRATIO','B','LSTAT']])
result2=model2.fit()
print(result2.summary())
#deal with multicollinearity
import seaborn
corr_df=x_train.corr(method='pearson')
print("-------------------create a correlation plot-------------------")
#create a mask to display only lower triangle
mask=np.zeros_like(corr_df)
mask[np.triu_indices_from(mask)]=True
#create heatmap using seaborn lib
#list if colormaps (parameter'cmap'is available)
seaborn.heatmap(corr_df,cmap='Accent',vmax=1.0,vmin=1.0,mask=mask,linewidths=2.5)
#show the plot
plt.yticks(rotation=0)
plt.xticks(rotation=90)
plt.show()
print("----------------------------------------end----------------------------------------!!")
###Output
----------------------------------------end----------------------------------------!!
|
Notebooks/UFMS_DATASET.ipynb | ###Markdown
**TIPOS DE SITUAÇÃO - ALUNO**
###Code
df['situacao'].value_counts()
df.groupby(by=['curso','unidade']).count().rename(columns={'nome':'quantidade'})[['quantidade']]
df.groupby(by=['curso','unidade'])[['nome']].count()
df.groupby(by=['curso'])['curso'].count()
df['curso'].value_counts().sort_values()
###Output
_____no_output_____
###Markdown
**ALUNOS COM MATRÍCULA REGULAR**
###Code
matriculados = df[df['situacao']=='REGULARMENTE MATRICULADO NO PERÍODO']
matriculados.count()
matriculados.to_excel('../Dados/generated/Alunos-Matriculados.xlsx',index=False)
cppp = matriculados[matriculados['unidade'] == 'CPPP']
rga_2017 = cppp[cppp['rga_descaracterizado'].str.contains('2017')]
rga_2017[rga_2017['curso'].str.contains('SISTEMAS')]
rga_2018 = cppp[cppp['rga_descaracterizado'].str.contains('2018')]
rga_2018[rga_2018['curso'].str.contains('SISTEMAS')].count()
rga_2019 = cppp[cppp['rga_descaracterizado'].str.contains('2019')]
rga_2019[rga_2019['curso'].str.contains('CIÊNCIA')].count()
rga_2019[rga_2019['curso'].str.contains('SISTEMAS')].count()
rga_2019[rga_2019['curso'].str.contains('MATEMÁTICA')].count()
rga_2019[rga_2019['curso'].str.contains('PEDAGOGIA')].count()
###Output
_____no_output_____ |
code/nmvoc4.3.2_map_mozart_fractions.ipynb | ###Markdown
This notebook calculates gridded maps with mass fractional contribution by sector to total NMVOC for MOZART VOCs species.
###Code
import xarray as xr
import numpy as np
import os
import pandas as pd
#data dir paths.
voc_dir='/geos/d21/s1878599/edgarv5_process/monthly_nmvocs4.3.2_mass_MOZART/'
save_dir='/geos/d21/s1878599/edgarv5_process/monthly_nmvocs4.3.2_fractional_MOZART/'
#create save directory if missing.
if not os.path.isdir(save_dir):
!mkdir -p $save_dir
###Output
_____no_output_____
###Markdown
Read VOCs
###Code
#get all vocs files as ordered dictionary of xarray datasets.
def get_vocs_arr(voc_dir):
'''
put all vocs files as ordered dictionary of xarray datasets.
voc_dir: path to where vocs files are.
output: lsit of dataset with individual vocs.
'''
vocs={}
for f in os.listdir(voc_dir):
vname=f.split('_')[3]
vocs.update({vname:xr.open_dataset(voc_dir+f)})
return vocs
vocs=get_vocs_arr(voc_dir)
# get total nmvoc summing all vocs contributions.
tot_nmvoc=sum(vocs.values())
tot_nmvoc
###Output
_____no_output_____
###Markdown
Create fractional maps
###Code
for k in list(vocs.keys()):
print(k)
ds=(vocs[k]/tot_nmvoc).fillna(0.0) # divide eac voc mass by total NMVOC mass.
ds.attrs['title']='Monthly fractional contribution of ' + k
ds.to_netcdf(save_dir+'monthly_v432_2010_fraction_'+ k + '_.0.1x0.1.nc',format='NETCDF3_64BIT') #save new file.
###Output
HCOOH
MEK
CH3COOH
BIGALK
C3H6
CH3COCH3
CH3CHO
C2H2
C2H5OH
BENZENE
XYLENES
BIGENE
CH2O
C2H6
TOLUENE
C2H4
C3H8
CH3OH
###Markdown
Check total vocs
###Code
#get all vocs files as ordered dictionary of xarray datasets.
def get_vocs_arr_frac(voc_dir):
vocs={}
for f in os.listdir(voc_dir):
vname=f.split('_')[4]
vocs.update({vname:xr.open_dataset(voc_dir+f)})
return vocs
vocs_frac=get_vocs_arr_frac(save_dir)
# get total nmvoc summing all vocs contributions.
for k,v in vocs_frac.items():
vocs_frac[k]=v*tot_nmvoc
tot_nmvoc_calc=sum(vocs_frac.values())
tot_nmvoc_calc
#test
xr.testing.assert_allclose(tot_nmvoc_calc, tot_nmvoc)
###Output
_____no_output_____ |
30_num_int/01_first_order.ipynb | ###Markdown
1차 적분First Order Numerical Integration [](https://www.youtube.com/watch?v=1p0NHR5w0Lc) 다시 면적이 1인 반원을 생각해 보자.Again, let's think about the half circle with area of 1. $$\begin{align} \pi r^2 &= 2 \\ r^2 &= \frac{2}{\pi} \\ r &= \sqrt{\frac{2}{\pi}}\end{align}$$
###Code
r = py.sqrt(2.0 / py.pi)
def half_circle(x):
return py.sqrt(np.abs(r**2 - x**2))
###Output
_____no_output_____
###Markdown
$$y = \sqrt{r^2 - x^2}$$
###Code
x_array = py.linspace(-r, r)
y_plus = half_circle(x_array)
py.fill_between(x_array, y_plus)
py.axis('equal')
py.grid(True)
###Output
_____no_output_____
###Markdown
이번에는 사다리꼴 규칙을 이용해서 구해 보기로 하자.This time, let's use the trapezoid rule to find its area. 사다리꼴 규칙Trapezoid Rule 다음과 같은 사다리꼴을 생각해 보자.Let's think about a trapezoid as follows.
###Code
x_array = (0, 1)
y_array = (1, 2)
py.fill_between(x_array, y_array)
py.axis('equal')
py.axis('off')
py.text(-0.25, 0.5, '$y_i$')
py.text(1.15, 1, '$y_{i+1}$')
py.text(0.5, -0.3, '$\Delta x$')
###Output
_____no_output_____
###Markdown
사다리꼴의 면적은 다음과 같다.Area of a trapezoid is as follows. $$a_i=\frac{1}{2} \left( y_i + y_{i+1} \right) \Delta x$$ 1차 적분First order numerical integration 마찬가지로 일정 간격으로 $x$ 좌표를 나누어 보자.Same as before, let's divide $x$ coordinates in a constant interval.
###Code
d = r * 2.0
n = 10
x_interval = d / n
x_array = py.linspace(-r, r)
y_plus = half_circle(x_array)
x_array_bar = py.arange(-r, r+x_interval*0.1, x_interval)
y_array_bar = half_circle(x_array_bar)
x_interval = x_array_bar[1]-x_array_bar[0]
py.fill_between(x_array, y_plus)
xp, yp = x_array_bar[0], y_array_bar[0]
for x, y in zip(x_array_bar.tolist()[1:], y_array_bar.tolist()[1:]):
py.fill_between((xp, x), (yp, y), alpha=0.5, color=py.random((1, 3)))
xp, yp = x, y
py.axis('equal')
py.grid(True)
###Output
_____no_output_____
###Markdown
사다리꼴의 면적을 하나씩 구해서 더해보자.Let's accumulate the area of trapezoids. $$ Area = \sum_{k=0}^{n-1} F_k$$ $$ F_k = \frac{\Delta x}{2}\left[f(x_k)+f(x_{k+1})\right]$$
###Code
def num_int_1(f, xi, xe, delta_x):
x_array = py.arange(xi, xe+delta_x*0.1, delta_x)
integration_result = 0.0
xp = x_array[0]
yp = f(xp)
for x_i in x_array[1:]:
y_i = f(x_i)
area_i = 0.5 * (yp + y_i) * (x_i - xp)
xp, yp = x_i, y_i
integration_result += area_i
return integration_result
n = 10
result = num_int_1(half_circle, -r, r, 2*r/n)
print('result =', result)
###Output
_____no_output_____
###Markdown
예상한 값 1에 더 비슷한 값을 얻기 위해 더 잘게 나누어 보자To obtain the result closer to the expected value of 1, let's divide with a narrower interval.
###Code
n = 100
result = num_int_1(half_circle, -r, r, 2*r/n)
print('result =', result)
###Output
_____no_output_____
###Markdown
도전 과제 1 : 다른 조건이 같을 때 0차 적분과 사다리꼴 적분의 오차를 비교해 보시오. 필요하면 해당 파이썬 함수를 복사하시오.Try this 1 : Compare the errors of the zeroth and first order integrations of the half circle example above using the same conditions. Duplicate the python function if necessary. 도전 과제 2 : 길이 $L=3[m]$ 인 외팔보가 분포 하중 $\omega=50sin\left(\frac{1}{2L}\pi x\right)[N/m]$을 받고 있을 때 전단력과 굽힘모멘트를 구하시오.Try this 2 : Calculate shear force and bending moment of a cantilever with length $L=3m$ under distributed load $\omega=50sin\left(\frac{1}{2L}\pi x\right)[N/m]$. (ref : C 4.4, Pytel, Kiusalaas & Sharma, Mechanics of Materials, 2nd Ed, SI, Cengage Learning, 2011.) Final Bell마지막 종
###Code
# stackoverfow.com/a/24634221
import os
os.system("printf '\a'");
###Output
_____no_output_____ |
machine_learning/lesson 3 - Neural Networks/Intro_to_CNNs.ipynb | ###Markdown
Introduction to Convolutional Neural Networks What are Convolutional Neural Networks (CNNs)?A Convolutional Neural Network is a type of neural network and Deep Learning algorithm that has been very popular in the last 10 years for its great involvement in improving Computer Vision (CV). They are excellent at classifying objects in an images that are being implemented in many areas, especially Medicine! How are CNNs contributing to Medicine?Convolutional Neural Networks (CNNs) are frequently used to help doctors identify cancer and other diseases in medical images of patients, as they have proven to be nearly as accurate as a team of medical experts at diagnosing patients. This tech enables doctors to provide faster and better treatment, cutting a lot of time out of the diagnosis process, and saving lives as a result. CNNs are being studied and improved at the best medical universities around the world, for the hope of having CNNs being applied for the diagnosis process especially in areas where medical specialists are rare. What is different between a CNN and a normal Neural Network?The main difference between these two algorithms is in how they learn features. Specifically, the difference occurs in the *Feature Learning* section from the CNN image above. In that section, we see that an image is being inserted at the input and it goes through a process where information is being picked from the image then summerized through **Convolutional Layers**. The information that is being picked are features in the image, such as the lights, tire, and shape of the car in the image. The reason it is called *Feature Learning* is because the way it looks for features in the images is automized as well, therefore the weights used in the convolutional layers are being trained and improved overtime.You can also see that the input image gets smaller overtime, which is good since we are wanting to gather the most important features. This action of reducing the image down to its most significant features is called *encoding*. So when we use CNNs in this way, we are *encoding* the data in the images to be used for classifying whether an image contains a disease or not.The *Classification* section of the CNN is just a normal (linear) Neural Network where the encoded features are learned to then give predictions at the end. The encoded features are flattened because the flattened features will be the input of the input layer. So you can see that a CNN is just a special type of Neural Network where it learns to get the most important information from the images. Loading the data from Kaggle[Click here to access the dataset](https://www.kaggle.com/preetviradiya/covid19-radiography-dataset)If you do not have an account with Kaggle, please make an account with them.After making an account, you need to download a *kaggle.json* file on your local machine that provides the api to download the dataset.To download this file follow the instructions below:1. Click on your account in the top right2. Click account settings3. Scroll down to the 'API' section4. Click 'Create New API Token' then it should downloadThe *kaggle.json* file is needed when it asks you to upload a file below before it downloads the dataset to this Colab.
###Code
from google.colab import files
from IPython.utils import io
import os
files.upload()
os.system("mkdir -p ~/.kaggle")
os.system("cp kaggle.json ~/.kaggle/")
os.system("chmod 600 ~/.kaggle/kaggle.json")
!kaggle datasets download -d preetviradiya/covid19-radiography-dataset
with io.capture_output() as captured:
!unzip covid19-radiography-dataset.zip && rm covid19-radiography-dataset.zip
###Output
_____no_output_____
###Markdown
Looking at the distrubition of our classesThe reason to look at the distribution is to understand how the number of samples per class can skew the predictions of the model. We see that their is an overwelming amount of normal X-Ray images that is greater than the other categories.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import PIL
%matplotlib inline
dataframe = pd.read_csv("/content/metadata.csv")
dataframe['label'].value_counts().plot.bar(color=['b', 'r', 'orange', 'g'])
plt.xticks(rotation=45)
plt.title('Class Distribution')
plt.show()
###Output
_____no_output_____
###Markdown
Data PreparationJust as our other projects/lessons, image data needs to be prepared to be used with PyTorch. One thing you might see different is the inclusion of PyTorch's [*Dataset*](https://pytorch.org/docs/stable/data.html?highlight=datasettorch.utils.data.Dataset) and [*Dataloader*](https://pytorch.org/docs/stable/data.html?highlight=dataloadertorch.utils.data.DataLoader) objects. These two classes allow us to easily pair images with their labels that is iterable through a for-loop to get a single batch of data, which we customize. Our data is organized by the name of the folders having the name of the class that each subdirectory image belongs to. Because of this we use [*ImageFolder*](https://pytorch.org/vision/stable/datasets.htmlimagefolder) that returns a PyTorch Dataframe ready to be loaded into dataloaders. Image ProcessingIf you remember from the past lessons where we used the *MNIST* dataset, each image was flattened as input into the neural network, which is an example of image processing but we didn't get into other transformations. PyTorch has many built-in image transformations that are very helpful for the performance of Convolutional Neural Networks. These are my favorites below:1. [Resize](https://pytorch.org/vision/stable/transforms.htmltorchvision.transforms.Resize): Resizes the images to a desired size while keeping its aspect ratio.2. [Center Crop](https://pytorch.org/vision/stable/transforms.htmltorchvision.transforms.CenterCrop): Crops an image in the center to the desired size (Height x Width)3. [Random Resized Crop](https://pytorch.org/vision/stable/transforms.htmltorchvision.transforms.RandomResizedCrop): This takes a random crop of your image with a random aspect ratio, then resizes the random crop to your desired size. This is helpful for getting different varation of objects in your images that it has to predict for.4. [Random Horizontal Crop](https://pytorch.org/vision/stable/transforms.htmltorchvision.transforms.RandomHorizontalFlip): Flips images horizontally with a given probability that you provide. This is really helpful to make sure the CNN learns features in the images that are influenced by the location inside the images. **This project does not use it because it is concievable that disease can effect certain areas of the lungs, therefore we want the CNN to learn features that are related to location.**6. [Random Rotation](https://pytorch.org/vision/stable/transforms.htmltorchvision.transforms.RandomRotation): This randomly rotates an image given your desired degrees in the range (-degrees, +degrees).5. [Grayscale](https://pytorch.org/vision/stable/transforms.htmltorchvision.transforms.Grayscale): This converts colored images to black-and-white images, this is helpful if you want to do make your data smaller that will make the training process faster and will hold less memory in your machine.6. [ColorJitter](https://pytorch.org/vision/stable/transforms.htmltorchvision.transforms.ColorJitter): This randomly changes the brightness, contrast, saturation and hue of each image. This brings some variation to the color of the images which can be helpful to have the CNN generalize the learning process.7. [Padding](https://pytorch.org/vision/stable/transforms.htmltorchvision.transforms.Pad): This adds a border layer around the image with a number of your choice (default is 0) which can be helpful when constructing your Convolutional Layers.You will see we only used the transformations Resize and ColorJitter below, we only used these because this dataset contains X-Ray images of infected lungs and it would be very important to keep the images as they are based on the assumption that diseases may effect different areas of the lungs. Therefore any transformations that transform the images based on location may inhibit the learning process of the model. The images were resized for the purpose of constructing the Convolutional Layers of the CNN, and changing the contrast randomly can perhaps highlight some infected areas in the images that the CNN can learn.
###Code
import torch
from torchvision.datasets import ImageFolder
import torchvision.transforms as transforms
root = "/content/COVID-19_Radiography_Dataset/COVID-19_Radiography_Dataset"
# ImageNet means and stds to normalize images in entire dataset
means = (0.485, 0.456, 0.406)
stds = (0.229, 0.224, 0.225)
# Add more transforms tomorrow
transforms = transforms.Compose([
# Image size is being resized to 3x224x224
transforms.Resize(224),
# Contrast Factor is being selected randomly between [0.5, 1.5]
transforms.ColorJitter(contrast=0.5),
# Converting images to PyTorch tensors
transforms.ToTensor(),
# Normalizing images with Means and STDs of ImageNet
transforms.Normalize(means, stds)
])
# Using ImageFolder to read in the images with their labeled folders
dataset = ImageFolder(root, transform=transforms)
# Calculating the amount of images to split the dataset
trainToTestRatio = 0.8
trainSize = int(trainToTestRatio * len(dataset))
testSize = len(dataset) - trainSize
# Calculating the amount to split the training set into validation set
valSize = int(trainSize * 0.1) # Taking 10% of training data
trainSize = trainSize - valSize # Updating the amount of training data
# Splitting Data
training_data, val_data, testing_data = torch.utils.data.random_split(dataset, lengths=[trainSize, valSize, testSize])
# Loading Data
batch_size = 16
train_dataloader = torch.utils.data.DataLoader(training_data, batch_size=batch_size, shuffle=True)
test_dataloader = torch.utils.data.DataLoader(testing_data, batch_size=batch_size)
val_dataloader = torch.utils.data.DataLoader(val_data, batch_size=batch_size)
# Using dictionaries to easily access all the dataloaders and datasets
loaders = {'train':train_dataloader, 'valid':val_dataloader, 'test':test_dataloader}
datasets = {'train':training_data, 'valid':val_data, 'test':testing_data}
print("These are the classes:", dataset.class_to_idx)
import torchvision.transforms.functional as F
# Function to reverse normalization of the images
def unNormalizeTensor(tensor, means, stds):
tensor_duplicate = tensor.detach().clone()
for channel, mean, std in zip(tensor_duplicate, means, stds):
channel.mul_(std).add_(mean)
return tensor_duplicate
# Printing the dimensions of each batch
images, labels = next(iter(train_dataloader))
print("Batch Image Dimensions:", images.size())
print("Batch Label Dimensions:", labels.size())
###Output
Batch Image Dimensions: torch.Size([16, 3, 224, 224])
Batch Label Dimensions: torch.Size([16])
###Markdown
Images without Normalization
###Code
# Printing images and their labels
print("\nImages without Normalization:")
idx_to_class = {value:key for key,value in dataset.class_to_idx.items()}
plt.figure(figsize=(10, 9))
for i in range(5):
plt.subplot(1, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
image = F.to_pil_image(unNormalizeTensor(images[i], means, stds))
plt.imshow(image)
plt.xlabel(idx_to_class[labels[i].item()])
###Output
Images without Normalization:
###Markdown
Images with NormalizationYou can see that normalizing the data can enhance some features in the lungs. And it looks pretty cool!
###Code
print("\nImages with Normalization:")
plt.figure(figsize=(10, 9))
for i in range(5):
plt.subplot(1, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
image = F.to_pil_image(images[i])
plt.imshow(image)
plt.xlabel(idx_to_class[labels[i].item()])
###Output
Images with Normalization:
###Markdown
Constucting the Convolutional Neural Network Convolutional LayersThe GIF above shows the Convolutional Process when learning features from the images. The input is an image with 3 Color Channels for RGB, and the filters or kernels are scanning across it gathering information. There is a lot of customizability through hyperparameters.The Three Hyperparameters:1. Kernel Size: The Kernels above is a 2-D matrix or 2-D array which often have a squared dimension (i.e., Height = Width) with weights inside them that are trainable (i.e., our algorithm can learn the best weights for the problem we want to solve). Kernels are responsible of finding features inside an image and condensing that information in the output, that is why they scan over the image. If you use a small kernel size, you retain a lot of information and reduce the memory cost of storing the kernal weights. On the other hand, if you have a larger kernel size, you generalize a lot of information in the image and reduce the output size of the learned features but require more memory to store the kernal weights. In this GIF, the kernel size is 3, since it is 3x3. So this would be considered a small kernal size.2. Stride: The number of pixels to hop between to scan the image. For example, when the stride is 1, the kernel only moves to the next right pixel each scan, as shown in the GIF. But if stride is two, the filter moves two pixels to the right. The greater stride you have, the few smaller the output feature size will be. Sometimes, people increase the stride to reduce the memory required by the CNN model. 3. Padding: Padding as described above in the image processing section is used to add a border around the image (think blank pixels around the border of the image). This can be used to ensure that the shape of the input is compatible with the CNN model. The image above demonstrates how the output features are effected by different padding and stride settings, keeping a constant kernal size of 3x3. On the left, no padding is used and the stride is 2(`padding = 0, stride=2`). The middle example shows that the output feature size increase to 3x3 when padding is 1 (i.e., adding a border of blank pixels to the input) stride is 2. The benefit of having padding, is that you can retain information of the borders while also retaining the dimensions of the input.The outputs of the convolutional layers are called *Feature Maps*, and you can control the number of features maps that a convolutional layer produces by adjusting the `out_channels` parameter in PyTorch. This will tell PyTorch how many individual kernal to make for a given layer. For example, for the first layer I said that I want to return 16 feature maps, so there are 16 kernels used. The *in_channels* parameter details the depth of the input of the layer and the depth of the kernels, since the input is an RGB image, the input has a depth of 3 and the kernels will also have a depth of 3 such as the GIF details.> Note: In CNNs, *depth* is a synonym for the number of feature maps.After the feature maps are outputted, we pass each feature through an activation function. Generally, we use this function to find non-linear patterns in our data. The most common activation function used in CNN's is *ReLU* showed above, it is known to produce accurate results while training faster which are both great benefits! After using ReLU, the feature maps are conduced using pooling which is decribed in the next section below. Pooling LayersPooling is another crucial part of the *Feature Learning* process, and you can see from the GIF above that it doesn't look so different from the Convolutional Process above. That's because they are very similar. Pooling layers use kernels and strides to scan the image for the purpose of reducing the dimensions of their input. Usually the input into a pooling layer are the feature maps of a convolutional layer.The one pooling technique used here is the *MaxPool* technique, where the kernel selects the maximum number of the scanned area it is currently looking at, as shown in the GIF above. Another popular pooling technique is called *AveragePooling* where the kernels computes the average of its current scanned area and outputs that average in its output. SummaryThere are three steps that are usually taken with CNNs:1. Use a convolutional layer to compute feature maps from the input. 2. Use ReLU to find non-linear patterns in the features maps.3. Use a pooling technique to reduce dimensionality and save memory.4. Repeat steps 1-3 for each convolutional layer.5. Flatten the last output feature maps so they can be used as input into the fully-connected neural network to be used for classification.If you want to understand how the Covolutional Layers work more, here are two great resources:1. [CNN Explainer (High-Level)](https://poloclub.github.io/cnn-explainer/): I recommend for everyone to go over this to understand the convolutional process better. It also includes lots of interactive visualizations for those of you who are visual learners!2. [Stanford's CS231n (Very deep and concise)](https://cs231n.github.io/convolutional-networks/conv): This contains all the information you can need for CNNs, and I would highly recommend you to go over somethings you are confused about if you are brave enough (lot's of math in this resource)! There is also really great formulas to figure out what your hyperparameters should be when creating your own CNNs, and I refer back to this resource a lot.
###Code
import torch.nn as nn
class CNN_Model(nn.Module):
# Constructing model
def __init__(self):
super(CNN_Model, self).__init__()
# Defining Batch Normalization
self.norm = nn.BatchNorm2d(num_features=3)
# Defining Convolutional Layers
self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=3, padding=1, stride=1) # Output Dimensions = (12, 224, 224)
self.conv2 = nn.Conv2d(in_channels=12, out_channels=16, kernel_size=3, padding=1, stride=1)
self.conv3 = nn.Conv2d(in_channels=16, out_channels=20, kernel_size=3, padding=1, stride=1)
# Defining MaxPool Layer
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
# Defining Fully Connected Layers
self.fc1 = nn.Linear(self.conv3.out_channels * 28 * 28, 3920) # Input Dimensions before Flattening = (20, 28, 28)
self.fc2 = nn.Linear(3920, 980)
self.fc3 = nn.Linear(980, 245)
self.fc4 = nn.Linear(245, 62)
self.fc5 = nn.Linear(62, 4) # Output Layer with 4 ending nodes for the four classes
# Activation Function to use throughout
self.relu = nn.ReLU()
# Dropout to prevent overfitting
self.dropout = nn.Dropout(p=0.5)
def forward(self, input):
# Batch Normalization for faster training
x = self.norm(input)
# First Convolutional Layer
x = self.relu(self.conv1(x)) # Output Shape = (12, 224, 224)
x = self.maxpool(x) # Output Shape = (12, 112, 112)
# Second Convolutional Layer
x = self.relu(self.conv2(x)) # Output Shape = (16, 112, 112)
x = self.maxpool(x) # Output Shape = (16, 56, 56)
# Third Convolutional Lay
x = self.relu(self.conv3(x)) # Output Shape = (20, 56, 56)
x = self.maxpool(x) # Output Shape = (20, 28, 28)
# Flattening Tensor
x = x.view(-1, 20*28*28)
# Fully Connected Layers
x = self.relu(self.fc1(x))
x = self.dropout(x)
x = self.relu(self.fc2(x))
x = self.dropout(x)
x = self.relu(self.fc3(x))
x = self.dropout(x)
x = self.relu(self.fc4(x))
x = self.fc5(x)
return x
###Output
_____no_output_____
###Markdown
Define Optimizer and Loss FunctionThis code also checks if a GPU is available to train the network
###Code
device = "cuda" if torch.cuda.is_available() else "cpu"
# Intializing model and having it use a GPU if available
model = CNN_Model()
model.to(device)
# Defining Optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# Defining Loss Function; CE Loss because we have multiple categories
criterion = nn.CrossEntropyLoss()
###Output
_____no_output_____
###Markdown
Train the model
###Code
def train(n_epochs, loaders, datasets, model, optimizer, criterion, device, save_path):
"""returns trained model"""
print("Starting Training...")
# Initialize tracker for minimum validation loss
valid_loss_min = np.Inf
losses = []
valid_losses = []
for epoch in range(1, n_epochs+1):
# Initialize variables to monitor training and validation loss
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train()
for images, labels in loaders['train']:
# Move to GPU if available
images, labels = images.to(device), labels.to(device)
# Zero the parameter gradients
optimizer.zero_grad()
# Calculating the output
output = model(images)
# Caluculating the Loss
loss = criterion(output, labels)
# Calculating the gradients
loss.backward()
# Performing Gradient Descent Step
optimizer.step()
# Saving the training loss
train_loss += loss.data
######################
# validate the model #
######################
model.eval()
for images, labels in loaders['valid']:
# Move to GPU if available
images, labels = images.to(device), labels.to(device)
# Getting the output
output = model(images)
# Calculating the Loss
loss = criterion(output, labels)
# Saving the validation loss
valid_loss += loss.data
# Averaging the losses
train_loss /= len(datasets['train'])
valid_loss /= len(datasets['valid'])
# Appending the losses to plot afterwards
losses.append(train_loss.item())
valid_losses.append(valid_loss.item())
# Print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch,
train_loss,
valid_loss))
# Save the model if validation loss has decreased
if valid_loss < valid_loss_min:
print('Saving Model')
torch.save(model.state_dict(), save_path)
valid_loss_min = valid_loss
# return trained model, and saved losses
return model, np.array(losses), np.array(valid_losses)
num_epochs=10
model, losses, valid_losses = train(10, loaders, datasets, model, optimizer, criterion, device, 'saved_model.pt')
def plot_losses(losses, title):
plt.plot(losses)
plt.xlabel("Epochs")
plt.ylabel("Cross Entropy Loss")
plt.title(title)
plt.show()
plot_losses(losses, title='Training Loss')
plot_losses(valid_losses, title='Validation Loss')
###Output
_____no_output_____
###Markdown
Test the Model
###Code
def getPredsFromLogits(logits):
# Using softmax to get an array that sums to 1, and then getting the index with the highest value
return torch.nn.functional.softmax(logits, dim=1).argmax(dim=1)
def test(loaders, model, criterion, device):
# monitor test loss and accuracy
test_loss = 0.0
correct = 0
total = 0
model.eval()
for images, labels in loaders['test']:
# move to GPU if available
images, labels = images.to(device), labels.to(device)
# forward pass: compute predicted outputs by passing inputs to the model
output = model(images)
# calculate the loss
loss = criterion(output, labels)
# update average test loss
test_loss += loss.data
# convert output probabilities to predicted class
pred = getPredsFromLogits(output)
# compare predictions to true label
correct += pred.eq(labels).sum().item()
total += pred.shape[0]
test_loss /= total
print('Test Loss: {:.6f}\n'.format(test_loss))
print('\nTest Accuracy: %2d%% (%2d/%2d)' % (100. * correct / total, correct, total))
test(loaders, model, criterion, device)
###Output
Test Loss: 0.032939
Test Accuracy: 79% (3367/4233)
###Markdown
How to load a saved modelIt is useful to save our trained model (i.e., like the weights it learned during training) and then reuse it later to prevent having to retrain it, which can take a very long time. This is really important when you want to work on your own projects!
###Code
# First create a new instance of the model class
saved_model = CNN_Model()
# Second, load state dict in the file that was saved then it should work as normal!
saved_model.load_state_dict(torch.load('saved_model.pt'))
# It will be downloaded to your computer as an example for you to save yours later
files.download('saved_model.pt')
###Output
_____no_output_____
###Markdown
SummaryOur model here was able to get a test accuracy of 79%, but it probably can be improved on. What do you think can be changed to improve the test results? You can perhaps add more layers, change the learning rate or optimizer. Something I noticed is that may have perhaps overfitted on the images of healthy lungs that it didn't perform well on lungs that were infected. This provides an area of open creativity for you to explore to better this model.I invite you to use this model as a starting point for your own CNN medical diagnosis model. See if you can updated an get even better results! Feel free to use it on another medical imaging dataset that interest you.Here are some links to more datasets to consider:1. [Stanford's Center for Artificial Intelligence in Medicine and Imaging](https://aimi.stanford.edu/research/public-datasets) (Highly-Recommend this one)2. [Open-Access Medical Image Repositories](https://www.aylward.org/notes/open-access-medical-image-repositories)
###Code
###Output
_____no_output_____ |
07_draft_assemblies/assemblies.ipynb | ###Markdown
Analysis of the alignments of draft assemblies using mummer
###Code
DELTA = {}
ASSEMBLIES = {}
tig_order = {}
cum_len = {}
for s in ['Jean-Talon_reordered.fasta','S288c.genome.fa','barcode11.cns.fa', 'Jean-Talon.unitigs.fasta', 'Jean-Talon.contigs.fasta']:
ASSEMBLIES[s] = {}
tig_order[s] = []
cl = []
with open(f'/Volumes/MacintoshHD/Dropbox/Jean_Talon/assemblies/{s}') as fi:
for seq in SeqIO.parse(fi, 'fasta'):
ASSEMBLIES[s][seq.id] = seq
tig_order[s].append(seq.id)
cl.append(len(seq.seq))
cl = pd.Series([0]+list(np.cumsum(cl)[:-1]), index=tig_order[s])
cum_len[s] = cl
if s!='Jean-Talon_reordered.fasta':
delta = pd.read_csv(f'/Volumes/MacintoshHD/Dropbox/Jean_Talon/mummer/{s}.coords', sep='\t', skiprows=range(4), index_col=None, header=None)
delta = delta.loc[(delta[4]>=1e4) & (delta[5]>=1e4)].astype({1:int,2:int,2:int,2:int}).sort_values(by=4)
delta['s1'] = delta[0]+cum_len['Jean-Talon_reordered.fasta'].loc[delta[7].values].values
delta['e1'] = delta[1]+cum_len['Jean-Talon_reordered.fasta'].loc[delta[7].values].values
delta['s2'] = delta[2]+cum_len[s].loc[delta[8].values].values
delta['e2'] = delta[3]+cum_len[s].loc[delta[8].values].values
DELTA[s] = delta
fig = plt.figure(figsize=[32,8])
gs = plt.GridSpec(ncols=4, nrows=1, wspace=0.3, left=0.03, bottom=0.08, right=0.96, top=0.9)
assembly_alias = {'Jean-Talon_reordered.fasta': 'Jean-Talon wtdbg2 polished',
'S288c.genome.fa': 'S288C',
'barcode11.cns.fa': 'Jean-Talon wtdbg2 draft (>8kb reads)',
'Jean-Talon.unitigs.fasta': 'Jean-Talon Canu draft unitigs (>8kb reads)',
'Jean-Talon.contigs.fasta': 'Jean-Talon Canu draft contigs (>8kb reads)'}
coord_translocation = np.mean(np.array([1.4e5, 1.47e5])+cum_len['Jean-Talon_reordered.fasta'].loc['ctg6_pilon'])
for ax_idx, s in enumerate(['S288c.genome.fa', 'barcode11.cns.fa', 'Jean-Talon.contigs.fasta', 'Jean-Talon.unitigs.fasta']):
ax = fig.add_subplot(gs[ax_idx])
delta = DELTA[s]
for i in delta.index:
s1, e1, s2, e2 = delta.loc[i, ['s1','e1','s2','e2']]
c = str(1-delta.loc[i,6]/100)
ax.plot([s1,e1], [s2,e2], color=c, lw=1)
for i in cum_len[s]:
ax.axhline(i, lw=0.5, ls=':', color='k')
for i in cum_len['Jean-Talon_reordered.fasta']:
ax.axvline(i, lw=0.5, ls=':', color='k')
ax.margins(0)
ax.axvline(coord_translocation, color='red', ls='-', lw=3, alpha=0.3)
assembly_size = ax.axis()
ax.set_xticks(np.arange(0, assembly_size[1], 1e6))
ax.set_xticklabels(np.arange(0, assembly_size[1]*1e-6, 1).astype(int))
ax.set_yticks(np.arange(0, assembly_size[3], 1e6))
ax.set_yticklabels(np.arange(0, assembly_size[3]*1e-6, 1).astype(int))
#if s=='S288c.genome.fa':
for tig, df in delta.groupby(8):
if len(ASSEMBLIES[s][tig].seq)>2e5:
ax.text(assembly_size[1]*1.02, np.mean([df['s2'].min(), df['e2'].max()]), s=tig, va='center', ha='left', size=7)
for tig, df in delta.groupby(7):
if len(ASSEMBLIES['Jean-Talon_reordered.fasta'][tig].seq)>2e5:
ax.text(np.mean([df['s1'].min(), df['e1'].max()]), assembly_size[3]*1.02, s=tig, va='bottom', ha='center', rotation=90, size=7)
ax.set_xlabel(assembly_alias['Jean-Talon_reordered.fasta'], size=14)
ax.set_ylabel(assembly_alias[s], size=14)
plt.savefig('/Volumes/MacintoshHD/Dropbox/Jean_Talon/fig/FigS7.svg')
#plt.show()
plt.close()
###Output
_____no_output_____ |
OOI/OOI_equipment_mapping.ipynb | ###Markdown
OOI Equipment mapping- by Landung Setiawan- 6/14/2016- This notebook is for retrieving information from google sheets and then mapping to a JSON file, each instrument has its own JSON file configuration- The required libraries for this manipulation is *gspread*, *oauth2client*, and *pycrypto*
###Code
# Google Authentication Libraries
import oauth2client, gspread
import json
# oauth2client version check and gspread
oauth_ver = oauth2client.__version__
gspread_ver = gspread.__version__
print "oauth2client version : {}".format(oauth_ver)
print "gspread version : {}".format(gspread_ver)
if oauth_ver < "2.0.2":
from oauth2client.client import SignedJwtAssertionCredentials
json_key = json.load(open('XXXX.json'))
# Get scope for google sheets
# Gather all spreadsheets shared with the client_email: [email protected]
scope = ['https://spreadsheets.google.com/feeds']
# Retrieve credentials from JSON key of service account
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope)
# Authorize gspread to connect to google sheets
gc = gspread.authorize(credentials)
else:
from oauth2client.service_account import ServiceAccountCredentials
# Get scope for google sheets
# Gather all spreadsheets shared with the client_email: [email protected]
scope = ['https://spreadsheets.google.com/feeds']
# Retrieve credentials from JSON key of service account
credentials = ServiceAccountCredentials.from_json_keyfile_name('XXXX.json', scope)
# Authorize gspread to connect to google sheets
gc = gspread.authorize(credentials)
# Get all spreadsheets available for NANOOS
gsheets = gc.openall()
# Get title of the spreadsheets
for i in range(0,len(gsheets)):
print "{0} {1}".format(i,gsheets[i].title)
# Open sensor_configurations_mappings only
sc = gc.open("sensor_configurations_mappings")
# Get all worksheets in a sheet
wks = sc.worksheets()
wks
s1 = sc.get_worksheet(0)
s2 = sc.get_worksheet(1)
print s1, s2
###Output
<Worksheet 'instruments' id:o5yzc1h> <Worksheet 'measurements' id:odfoenj>
###Markdown
Parsing data to a pandas dataframe- Now that connection has been established, data is parsed to be viewed
###Code
# Import pandas and numpy to make data easier to view
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
print "pandas version: {}".format(pd.__version__)
print "numpy version: {}".format(np.__version__)
# Getting all the values of sheet1
array1 = s1.get_all_values()
array2 = s2.get_all_values()
# Convert data into pandas dataframe
df = pd.DataFrame(array1)
df.columns = array1[0]
df.drop(df.index[0], inplace=True)
df = df.convert_objects(convert_numeric=True)
df.head()
# Convert data into pandas dataframe
df1 = pd.DataFrame(array2)
df1.columns = array2[0]
df1.drop(df1.index[0], inplace=True)
df1 = df1.convert_objects(convert_numeric=True)
df1.head()
def createJSON(df):
# Get Platforms
json_data = df[['platform','instrument','depth_m','mfn','deployment','data_logger','subtype']].reset_index(drop=True)
platforms = json_data['platform'].unique()
mainkey = dict()
prop = dict()
# Gather Platform info together
plat = [json_data.loc[json_data['platform'] == p] for p in platforms]
# Create JSON
for i in range(0, len(plat)):
instrum = dict()
mainkey = dict()
for j in range(0, len(plat[i]['platform'].values)):
platform_name = plat[i]['platform'].values[j]
instrument_name = plat[i]['instrument'].values[j]
depth_m = plat[i]['depth_m'].values[j]
mfn = plat[i]['mfn'].values[j]
deployment = plat[i]['deployment'].values[j]
data_logger = plat[i]['data_logger'].values[j]
subtype = plat[i]['subtype'].values[j]
# Check for mfn
if mfn != '':
mfn = True
else:
mfn = False
# Getting subtype
if subtype != '':
subtype = subtype.split('::')[1]
else:
subtype = None
prop['depth_m'] = float(depth_m)
prop['mfn'] = mfn
prop['deployment'] = deployment
prop['data_logger'] = data_logger
prop['subtype'] = subtype
instrum['{}'.format(instrument_name)] = prop
mainkey['{}'.format(platform_name)] = instrum
prop = dict()
# prints the JSON structured dictionary
print json.dumps(mainkey, sort_keys=True, indent=4, separators=(',', ': '))
# Output to JSON file
fj = open("{}.json".format(platform_name), 'w')
fj.write(json.dumps(mainkey, sort_keys=False, indent=4, separators=(',', ': ')))
fj.close()
createJSON(df)
###Output
{
"CE07SHSM": {
"ctdbp1": {
"data_logger": "cg_data/dcl27",
"deployment": "D00003",
"depth_m": -7.0,
"mfn": false,
"subtype": "1"
},
"ctdbp2": {
"data_logger": "cg_data/dcl37",
"deployment": "D00003",
"depth_m": -87.0,
"mfn": true,
"subtype": "2"
},
"metbk": {
"data_logger": "cg_data/dcl11",
"deployment": "D00003",
"depth_m": 0.0,
"mfn": false,
"subtype": null
},
"wavss": {
"data_logger": "cg_data/dcl12",
"deployment": "D00003",
"depth_m": 0.0,
"mfn": false,
"subtype": null
}
}
}
{
"CE02SHSM": {
"ctdbp": {
"data_logger": "cg_data/dcl27",
"deployment": "D00003",
"depth_m": -7.0,
"mfn": false,
"subtype": "1"
},
"metbk": {
"data_logger": "cg_data/dcl11",
"deployment": "D00003",
"depth_m": 0.0,
"mfn": false,
"subtype": null
},
"wavss": {
"data_logger": "cg_data/dcl12",
"deployment": "D00003",
"depth_m": 0.0,
"mfn": false,
"subtype": null
}
}
}
{
"CE04OSSM": {
"ctdbp": {
"data_logger": "cg_data/dcl27",
"deployment": "D00002",
"depth_m": -7.0,
"mfn": false,
"subtype": "1"
},
"metbk": {
"data_logger": "cg_data/dcl11",
"deployment": "D00002",
"depth_m": 0.0,
"mfn": false,
"subtype": null
},
"wavss": {
"data_logger": "cg_data/dcl12",
"deployment": "D00002",
"depth_m": 0.0,
"mfn": false,
"subtype": null
}
}
}
{
"CE09OSSM": {
"ctdbp1": {
"data_logger": "cg_data/dcl27",
"deployment": "D00003",
"depth_m": -7.0,
"mfn": false,
"subtype": "1"
},
"ctdbp2": {
"data_logger": "cg_data/dcl37",
"deployment": "D00001",
"depth_m": -540.0,
"mfn": true,
"subtype": "2"
},
"metbk": {
"data_logger": "cg_data/dcl11",
"deployment": "D00003",
"depth_m": 0.0,
"mfn": false,
"subtype": null
},
"wavss": {
"data_logger": "cg_data/dcl12",
"deployment": "D00003",
"depth_m": 0.0,
"mfn": false,
"subtype": null
}
}
}
{
"CE01ISSM": {
"ctdbp1": {
"data_logger": "dcl16",
"deployment": "D00005",
"depth_m": -7.0,
"mfn": false,
"subtype": "2"
},
"ctdbp2": {
"data_logger": "dcl37",
"deployment": "D00001",
"depth_m": -25.0,
"mfn": true,
"subtype": "2"
}
}
}
{
"CE02SHBP": {
"": {
"data_logger": "LJ01D",
"deployment": "",
"depth_m": -80.0,
"mfn": false,
"subtype": null
}
}
}
{
"CE04OSBP": {
"": {
"data_logger": "LJ01C",
"deployment": "",
"depth_m": -580.0,
"mfn": false,
"subtype": null
}
}
}
{
"CE06ISSM": {
"ctdbp1": {
"data_logger": "dcl16",
"deployment": "D00004",
"depth_m": -7.0,
"mfn": false,
"subtype": "2"
},
"ctdbp2": {
"data_logger": "dcl37",
"deployment": "D00004",
"depth_m": -29.0,
"mfn": true,
"subtype": "2"
}
}
}
|
tutorials/tutorial-2-warp_drive_sampler.ipynb | ###Markdown
Copyright (c) 2021, salesforce.com, inc. \All rights reserved. \SPDX-License-Identifier: BSD-3-Clause \For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause **Try this notebook on [Colab](http://colab.research.google.com/github/salesforce/warp-drive/blob/master/tutorials/tutorial-2-warp_drive_sampler.ipynb)!** ⚠️ PLEASE NOTE:This notebook runs on a GPU runtime.\If running on Colab, choose Runtime > Change runtime type from the menu, then select `GPU` in the 'Hardware accelerator' dropdown menu. Welcome to WarpDrive! This is the second tutorial on WarpDrive, a PyCUDA-based framework for extremely parallelized multi-agent reinforcement learning (RL) on a single graphics processing unit (GPU). At this stage, we assume you have read our [first tutorial](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1-warp_drive_basics.ipynb) on WarpDrive basics.In this tutorial, we describe **CUDASampler**, a lightweight and fast action sampler based on the policy distribution across several RL agents and environment replicas. `CUDASampler` utilizes the GPU to parallelize operations to efficiently sample a large number of actions in parallel. Notably:1. It reads the distribution on the GPU through Pytorch and samples actions exclusively at the GPU. There is no data transfer. 2. It maximizes parallelism down to the individual thread level, i.e., each agent at each environment has its own random seed and independent random sampling process. 3. It runs much faster than most GPU samplers. For example, it is significantly faster than Pytorch. Dependencies You can install the warp_drive package using- the pip package manager, OR- by cloning the warp_drive package and installing the requirements.On Colab, we will do the latter.
###Code
import sys
IN_COLAB = "google.colab" in sys.modules
if IN_COLAB:
! git clone https://github.com/salesforce/warp-drive.git
% cd warp-drive
! pip install -e .
else:
! pip install rl_warp_drive
###Output
_____no_output_____
###Markdown
Initialize CUDASampler
###Code
import torch
import numpy as np
from warp_drive.managers.function_manager import CUDAFunctionManager, CUDASampler
from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.utils.constants import Constants
from warp_drive.utils.data_feed import DataFeed
from warp_drive.utils.common import get_project_root
_CUBIN_FILEPATH = f"{get_project_root()}/warp_drive/cuda_bin"
_ACTIONS = Constants.ACTIONS
###Output
_____no_output_____
###Markdown
We first initialize the **CUDADataManager** and **CUDAFunctionManager**. To illustrate the sampler, we first load a pre-compiled binary file called "test_build.cubin". This binary is compiled with inclusion of auxiliary files in `warp_drive/cuda_includes/core` which includes several CUDA core services provided by WarpDrive. These include the backend source code for `CUDASampleController`. To make "test_build.fatbin" available, you need go to `warp_drive/cuda_includes` to compile this test cubin by calling `make compile-test`For this notebook demonstration, in the bin folder, we already provide a pre-compiled binary.Finally, we initialize **CUDASampler** and assign the random seed. `CUDASampler` keeps independent randomness across all threads and blocks. Notice that `CUDASampler` requires `CUDAFunctionManager` because `CUDAFunctionManager` manages all the CUDA function pointers including to the sampler. Also notice this test binary uses 2 environment replicas and 5 agents.
###Code
# Set logger level e.g., DEBUG, INFO, WARNING, ERROR
import logging
logging.getLogger().setLevel(logging.INFO)
cuda_data_manager = CUDADataManager(num_agents=5, episode_length=10, num_envs=2)
cuda_function_manager = CUDAFunctionManager(
num_agents=cuda_data_manager.meta_info("n_agents"),
num_envs=cuda_data_manager.meta_info("n_envs"),
)
cuda_function_manager.load_cuda_from_binary_file(
f"{_CUBIN_FILEPATH}/test_build.fatbin", default_functions_included=True
)
cuda_sampler = CUDASampler(function_manager=cuda_function_manager)
cuda_sampler.init_random(seed=None)
###Output
_____no_output_____
###Markdown
Sampling Actions Placeholder Now, we feed the **actions_a** placeholder into the GPU. It has the shape `(n_envs=2, n_agents=5)` as expected. Also we make it accessible by Pytorch, because during RL training, actions will be fed into the Pytorch trainer directly.
###Code
data_feed = DataFeed()
data_feed.add_data(name=f"{_ACTIONS}_a", data=[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
cuda_data_manager.push_data_to_device(data_feed, torch_accessible=True)
assert cuda_data_manager.is_data_on_device_via_torch(f"{_ACTIONS}_a")
###Output
_____no_output_____
###Markdown
Action Sampled Distribution We define an action **distribution** here. During training, this distribution would be provided by the policy model implemented in Pytorch. The distribution has the shape `(n_envs, n_agents, **n_actions**)`. The last dimension `n_actions` defines the size of the action space for a particular *discrete* action. For example, if we have up, down, left, right and no-ops, `n_actions=5`.**n_actions** needs to be registered by the sampler so the sampler is able to pre-allocate a global memory space in GPU to speed up action sampling. This can be done by calling `sampler.register_actions()`.In this tutorial, we check if our sampled action distribution follows the given distribution. For example, the distribution [0.333, 0.333, 0.333] below suggests the 1st agent has 3 possible actions and each of them have equal probability.
###Code
cuda_sampler.register_actions(
cuda_data_manager, action_name=f"{_ACTIONS}_a", num_actions=3
)
distribution = np.array(
[
[
[0.333, 0.333, 0.333],
[0.2, 0.5, 0.3],
[0.95, 0.02, 0.03],
[0.02, 0.95, 0.03],
[0.02, 0.03, 0.95],
],
[
[0.1, 0.7, 0.2],
[0.7, 0.2, 0.1],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
],
]
)
distribution = torch.from_numpy(distribution).float().cuda()
# Run 10000 times to collect statistics
actions_batch = torch.from_numpy(np.empty((10000, 2, 5), dtype=np.int32)).cuda()
for i in range(10000):
cuda_sampler.sample(cuda_data_manager, distribution, action_name=f"{_ACTIONS}_a")
actions_batch[i] = cuda_data_manager.data_on_device_via_torch(f"{_ACTIONS}_a")
actions_batch_host = actions_batch.cpu().numpy()
actions_env_0 = actions_batch_host[:, 0]
actions_env_1 = actions_batch_host[:, 1]
print(
"Sampled actions distribution versus the given distribution (in bracket) for env 0: \n"
)
for agent_id in range(5):
print(
f"Sampled action distribution for agent_id: {agent_id}:\n"
f"{(actions_env_0[:, agent_id] == 0).sum() / 10000.0}({distribution[0, agent_id, 0]}), \n"
f"{(actions_env_0[:, agent_id] == 1).sum() / 10000.0}({distribution[0, agent_id, 1]}), \n"
f"{(actions_env_0[:, agent_id] == 2).sum() / 10000.0}({distribution[0, agent_id, 2]}) \n"
)
print(
"Sampled actions distribution versus the given distribution (in bracket) for env 1: "
)
for agent_id in range(5):
print(
f"Sampled action distribution for agent_id: {agent_id}:\n"
f"{(actions_env_1[:, agent_id] == 0).sum() / 10000.0}({distribution[1, agent_id, 0]}), \n"
f"{(actions_env_1[:, agent_id] == 1).sum() / 10000.0}({distribution[1, agent_id, 1]}), \n"
f"{(actions_env_1[:, agent_id] == 2).sum() / 10000.0}({distribution[1, agent_id, 2]}) \n"
)
###Output
_____no_output_____
###Markdown
Action Randomness Across Threads Another important validation is whether the sampler provides independent randomness across different agents and environment replicas. Given the same policy model for all the agents and environment replicas, we can check if the sampled actions are independently distributed. Here, we assign all agents across all envs the same distribution [0.25, 0.25, 0.25, 0.25]. It is equivalent to an uniform action distribution among all actions [0,1,2,3], across 5 agents and 2 envs. Then we check the standard deviation across the agents.
###Code
data_feed = DataFeed()
data_feed.add_data(name=f"{_ACTIONS}_b", data=[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
cuda_data_manager.push_data_to_device(data_feed, torch_accessible=True)
assert cuda_data_manager.is_data_on_device_via_torch(f"{_ACTIONS}_b")
cuda_sampler.register_actions(
cuda_data_manager, action_name=f"{_ACTIONS}_b", num_actions=4
)
distribution = np.array(
[
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
],
[
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25],
],
]
)
distribution = torch.from_numpy(distribution).float().cuda()
# Run 10000 times to collect statistics.
actions_batch = torch.from_numpy(np.empty((10000, 2, 5), dtype=np.int32)).cuda()
for i in range(10000):
cuda_sampler.sample(cuda_data_manager, distribution, action_name=f"{_ACTIONS}_b")
actions_batch[i] = cuda_data_manager.data_on_device_via_torch(f"{_ACTIONS}_b")
actions_batch_host = actions_batch.cpu().numpy()
actions_batch_host
actions_batch_host.std(axis=2).mean(axis=0)
###Output
_____no_output_____
###Markdown
To check the independence of randomness among all threads, we can compare it with a Numpy implementation. Here we use `numpy.choice(4, 5)` to repeat the same process for an uniform action distribution among all actions [0,1,2,3], 5 agents and 2 envs. We should see that the variation of Numpy output is very close to our sampler.
###Code
actions_batch_numpy = np.empty((10000, 2, 5), dtype=np.int32)
for i in range(10000):
actions_batch_numpy[i, 0, :] = np.random.choice(4, 5)
actions_batch_numpy[i, 1, :] = np.random.choice(4, 5)
actions_batch_numpy.std(axis=2).mean(axis=0)
###Output
_____no_output_____
###Markdown
Running Speed The total time for sampling includes receiving a new distribution and using this to sample.Comparing our sampler with [torch.Categorical sampler](https://pytorch.org/docs/stable/distributions.html), we reach **7-8X** speed up for the distribution above. *Note: our sampler runs in parallel across threads, so this speed-up is almost constant when scaling up the number of agents or environment replicas, i.e., increasing the number of used threads.*
###Code
from torch.distributions import Categorical
distribution = np.array(
[
[
[0.333, 0.333, 0.333],
[0.2, 0.5, 0.3],
[0.95, 0.02, 0.03],
[0.02, 0.95, 0.03],
[0.02, 0.03, 0.95],
],
[
[0.1, 0.7, 0.2],
[0.7, 0.2, 0.1],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5],
[0.5, 0.0, 0.5],
],
]
)
distribution = torch.from_numpy(distribution).float().cuda()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(1000):
cuda_sampler.sample(cuda_data_manager, distribution, action_name=f"{_ACTIONS}_a")
end_event.record()
torch.cuda.synchronize()
print(f"time elapsed: {start_event.elapsed_time(end_event)} ms")
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(1000):
Categorical(distribution).sample()
end_event.record()
torch.cuda.synchronize()
print(f"time elapsed: {start_event.elapsed_time(end_event)} ms")
###Output
_____no_output_____
###Markdown
Copyright (c) 2021, salesforce.com, inc. \All rights reserved. \SPDX-License-Identifier: BSD-3-Clause \For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause **Try this notebook on [Colab](http://colab.research.google.com/github/salesforce/warp-drive/blob/master/tutorials/tutorial-2-warp_drive_sampler.ipynb)!** ⚠️ PLEASE NOTE:This notebook runs on a GPU runtime.\If running on Colab, choose Runtime > Change runtime type from the menu, then select 'GPU' in the dropdown. Welcome to WarpDrive! This is the second tutorial on WarpDrive, a PyCUDA-based framework for extremely parallelized multi-agent reinforcement learning (RL) on a single graphics procesing unit (GPU). At this stage, we assume you have read our [first tutorial](https://www.github.com/salesforce/warp-drive/blob/master/tutorials/tutorial-1-warp_drive_basics.ipynb) on WarpDrive basics.In this tutorial, we describe **CUDASampler**, a lightweight and fast action sampler based on the policy distribution across several RL agents and environment replicas. `CUDASampler` utilizes the GPU to parallelize operations to efficiently sample a large number of actions in parallel. Notably:1. It reads the distribution on the GPU through Pytorch and samples actions exclusively at the GPU. There is no data transfer. 2. It maximizes parallelism down to the individual thread level, i.e., each agent at each environment has its own random seed and independent random sampling process. 3. It runs much faster than most GPU samplers. For example, it is significantly faster than Pytorch. Dependencies You can install the warp_drive package using- the pip package manager, OR- by cloning the warp_drive package and installing the requirements.On Colab, we will do the latter.
###Code
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
! git clone https://github.com/salesforce/warp-drive.git
% cd warp-drive
! pip install -e .
else:
! pip install rl_warp_drive
###Output
_____no_output_____
###Markdown
Initialize CUDASampler
###Code
import torch
import numpy as np
from warp_drive.managers.function_manager import CUDAFunctionManager, CUDASampler
from warp_drive.managers.data_manager import CUDADataManager
from warp_drive.utils.constants import Constants
from warp_drive.utils.data_feed import DataFeed
from warp_drive.utils.common import get_project_root
_CUBIN_FILEPATH = f"{get_project_root()}/warp_drive/cuda_bin"
_ACTIONS = Constants.ACTIONS
###Output
_____no_output_____
###Markdown
We first initialize the **CUDADataManager** and **CUDAFunctionManager**. To illustrate the sampler, we first load a pre-compiled binary file called "test_build.cubin". This binary is compiled with inclusion of auxiliary files in `warp_drive/cuda_includes/core` which includes several CUDA core services provided by WarpDrive. These include the backend source code for `CUDASampleController`. To make "test_build.fatbin" available, you need go to `warp_drive/cuda_includes` to compile this test cubin by calling `make compile-test`For this notebook demonstration, in the bin folder, we already provide a pre-compiled binary.Finally, we initialize **CUDASampler** and assign the random seed. `CUDASampler` keeps independent randomness across all threads and blocks. Notice that `CUDASampler` requires `CUDAFunctionManager` because `CUDAFunctionManager` manages all the CUDA function pointers including to the sampler. Also notice this test binary uses 2 environment replicas and 5 agents.
###Code
cuda_data_manager = CUDADataManager(num_agents=5, episode_length=10, num_envs=2)
cuda_function_manager = CUDAFunctionManager(num_agents=cuda_data_manager.meta_info("n_agents"),
num_envs=cuda_data_manager.meta_info("n_envs"))
cuda_function_manager.load_cuda_from_binary_file(f"{_CUBIN_FILEPATH}/test_build.fatbin", default_functions_included=True)
cuda_sampler = CUDASampler(function_manager=cuda_function_manager)
cuda_sampler.init_random(seed=None)
###Output
_____no_output_____
###Markdown
Sampling Actions Placeholder Now, we feed the **actions_a** placeholder into the GPU. It has the shape `(n_envs=2, n_agents=5)` as expected. Also we make it accessible by Pytorch, because during RL training, actions will be fed into the Pytorch trainer directly.
###Code
data_feed = DataFeed()
data_feed.add_data(name=f"{_ACTIONS}_a", data=[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
cuda_data_manager.push_data_to_device(data_feed, torch_accessible=True)
assert cuda_data_manager.is_data_on_device_via_torch(f"{_ACTIONS}_a")
###Output
_____no_output_____
###Markdown
Action Sampled Distribution We define an action **distribution** here. During training, this distribution would be provided by the policy model implemented in Pytorch. The distribution has the shape `(n_envs, n_agents, **n_actions**)`. The last dimension `n_actions` defines the size of the action space for a particular *discrete* action. For example, if we have up, down, left, right and no-ops, `n_actions=5`.**n_actions** needs to be registered by the sampler so the sampler is able to pre-allocate a global memory space in GPU to speed up action sampling. This can be done by calling `sampler.register_actions()`.In this tutorial, we check if our sampled action distribution follows the given distribution. For example, the distribution [0.333, 0.333, 0.333] below suggests the 1st agent has 3 possible actions and each of them have equal probability.
###Code
cuda_sampler.register_actions(cuda_data_manager, action_name=f"{_ACTIONS}_a", num_actions=3)
distribution = np.array([[[0.333, 0.333, 0.333], [0.2, 0.5, 0.3], [0.95, 0.02, 0.03], [0.02, 0.95, 0.03], [0.02, 0.03, 0.95]],
[[0.1, 0.7, 0.2], [0.7, 0.2, 0.1], [0.5, 0.5, 0.0], [0.0, 0.5, 0.5], [0.5, 0.0, 0.5]]])
distribution = torch.from_numpy(distribution).float().cuda()
# Run 10000 times to collect statistics
actions_batch = torch.from_numpy(np.empty((10000, 2, 5), dtype=np.int32)).cuda()
for i in range(10000):
cuda_sampler.sample(cuda_data_manager,
distribution,
action_name=f"{_ACTIONS}_a")
actions_batch[i] = cuda_data_manager.data_on_device_via_torch(f"{_ACTIONS}_a")
actions_batch_host = actions_batch.cpu().numpy()
actions_env_0 = actions_batch_host[:, 0]
actions_env_1 = actions_batch_host[:, 1]
print("Sampled actions distribution versus the given distribution (in bracket) for env 0: \n")
for agent_id in range(5):
print(f"Sampled action distribution for agent_id: {agent_id}:\n"
f"{(actions_env_0[:, agent_id] == 0).sum() / 10000.0}({distribution[0, agent_id, 0]}), \n"
f"{(actions_env_0[:, agent_id] == 1).sum() / 10000.0}({distribution[0, agent_id, 1]}), \n"
f"{(actions_env_0[:, agent_id] == 2).sum() / 10000.0}({distribution[0, agent_id, 2]}) \n")
print("Sampled actions distribution versus the given distribution (in bracket) for env 1: ")
for agent_id in range(5):
print(f"Sampled action distribution for agent_id: {agent_id}:\n"
f"{(actions_env_1[:, agent_id] == 0).sum() / 10000.0}({distribution[1, agent_id, 0]}), \n"
f"{(actions_env_1[:, agent_id] == 1).sum() / 10000.0}({distribution[1, agent_id, 1]}), \n"
f"{(actions_env_1[:, agent_id] == 2).sum() / 10000.0}({distribution[1, agent_id, 2]}) \n")
###Output
_____no_output_____
###Markdown
Action Randomness Across Threads Another important validation is whether the sampler provides independent randomness across different agents and environment replicas. Given the same policy model for all the agents and environment replicas, we can check if the sampled actions are independently distributed. Here, we assign all agents across all envs the same distribution [0.25, 0.25, 0.25, 0.25]. It is equivalent to an uniform action distribution among all actions [0,1,2,3], across 5 agents and 2 envs. Then we check the standard deviation across the agents.
###Code
data_feed = DataFeed()
data_feed.add_data(name=f"{_ACTIONS}_b", data=[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])
cuda_data_manager.push_data_to_device(data_feed, torch_accessible=True)
assert cuda_data_manager.is_data_on_device_via_torch(f"{_ACTIONS}_b")
cuda_sampler.register_actions(cuda_data_manager, action_name=f"{_ACTIONS}_b", num_actions=4)
distribution = np.array([[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]],
[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]]])
distribution = torch.from_numpy(distribution).float().cuda()
# Run 10000 times to collect statistics.
actions_batch = torch.from_numpy(np.empty((10000, 2, 5), dtype=np.int32)).cuda()
for i in range(10000):
cuda_sampler.sample(cuda_data_manager,
distribution,
action_name=f"{_ACTIONS}_b")
actions_batch[i] = cuda_data_manager.data_on_device_via_torch(f"{_ACTIONS}_b")
actions_batch_host = actions_batch.cpu().numpy()
actions_batch_host
actions_batch_host.std(axis=2).mean(axis=0)
###Output
_____no_output_____
###Markdown
To check the independence of randomness among all threads, we can compare it with a Numpy implementation. Here we use `numpy.choice(4, 5)` to repeat the same process for an uniform action distribution among all actions [0,1,2,3], 5 agents and 2 envs. We should see that the variation of Numpy output is very close to our sampler.
###Code
actions_batch_numpy = np.empty((10000, 2, 5), dtype=np.int32)
for i in range(10000):
actions_batch_numpy[i,0,:] = np.random.choice(4, 5)
actions_batch_numpy[i,1,:] = np.random.choice(4, 5)
actions_batch_numpy.std(axis=2).mean(axis=0)
###Output
_____no_output_____
###Markdown
Running Speed The total time for sampling includes receiving a new distribution and using this to sample.Comparing our sampler with [torch.Categorical sampler](https://pytorch.org/docs/stable/distributions.html), we reach **7-8X** speed up for the distribution above. *Note: our sampler runs in parallel across threads, so this speed-up is almost constant when scaling up the number of agents or environment replicas, i.e., increasing the number of used threads.*
###Code
from torch.distributions import Categorical
distribution = np.array([[[0.333, 0.333, 0.333], [0.2, 0.5, 0.3], [0.95, 0.02, 0.03], [0.02, 0.95, 0.03], [0.02, 0.03, 0.95]],
[[0.1, 0.7, 0.2], [0.7, 0.2, 0.1], [0.5, 0.5, 0.0], [0.0, 0.5, 0.5], [0.5, 0.0, 0.5]]])
distribution = torch.from_numpy(distribution).float().cuda()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(1000):
cuda_sampler.sample(cuda_data_manager, distribution, action_name=f"{_ACTIONS}_a")
end_event.record()
torch.cuda.synchronize()
print(f"time elapsed: {start_event.elapsed_time(end_event)} ms")
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(1000):
Categorical(distribution).sample()
end_event.record()
torch.cuda.synchronize()
print(f"time elapsed: {start_event.elapsed_time(end_event)} ms")
###Output
_____no_output_____ |
case8_challenge_00_pix2wcs.ipynb | ###Markdown
Case8-challenge00_pix2wcs Modified version from Case7-0challengef by T. Kamizuka. In this note, we estimate the field parameters and distortion parameters from the observed positions on the focal plane in the overlapped plates. We also use reference stars (Gaia stars) whose sky coordinates are known in a certain accuracy. The SIP-convention distortion is considered in this note. Preparation First, we load the data from https://github.com/xr0038/jasmine_warpfield/tree/master/challenge/case8.
###Code
import astropy.io.ascii as asc
import astropy.units as u
objects = asc.read('/Users/dkawata/work/obs/projs/JASMINE-Mission/analysis-testing-e2e/jasmine_warpfield/challenge/case8/case8_challenge_00.txt') #consists of x (um), y (um), catalog_id, ra (deg), dec (deg), and field.
pointings = asc.read('/Users/dkawata/work/obs/projs/JASMINE-Mission/analysis-testing-e2e/jasmine_warpfield/challenge/case8/case8_challenge_00_pointing.txt') # consists of field, ra (deg), dec (deg), and pa (deg).
###Output
_____no_output_____
###Markdown
We can convert the units of x and y from um to pix with assuming the pixel size to be 15 um. However, we will use um unit for the detector coordinates. The input data are created with Sip definition of crpix=[0,0] and origin=1, which map the origin to [0 um, 0 um].
###Code
pix_size = 15.*u.um
# objects['x'] = (objects['x']/pix_size).si
# objects['y'] = (objects['y']/pix_size).si
# objects: x (px), y (px), catalog_id, ra (deg), dec (deg), and field.
# pointings: field, ra (deg), dec (deg), and pa (deg).
###Output
_____no_output_____
###Markdown
Then, we change the ids for easy handling.
###Code
from astropy.table import unique
import numpy as np
ids = unique(objects, keys='catalog_id')['catalog_id']
objects.add_column(-1, name='id')
for i in range(0, np.size(ids)):
pos = np.where(objects['catalog_id']==ids[i])
objects['id'][pos] = i
objects.remove_column('catalog_id')
objects.rename_column('id', 'catalog_id')
###Output
_____no_output_____
###Markdown
Here, we make some arrays for further analysis. One new array is true_radec which stores true ra/dec values. Duplicated information (rows for the same object) is removed, and the rows are sorted with object ids. Another new array is observed_xy. It contains field ids, observed x/y positions on the focal plane, catalog ids. We rename ra, dec to ra_est, dec_est to store the estimated sky positions.
###Code
true_radec = objects['catalog_id', 'ra', 'dec'].copy()
true_radec.sort('catalog_id')
true_radec = unique(true_radec, keys='catalog_id') # consists of catalog_id, ra (deg), and dec (deg).
observed_xy = objects['field', 'x', 'y', 'catalog_id', 'ra', 'dec'].copy()
# observed_xy.rename_column('ra', 'ra_est')
# observed_xy.rename_column('dec', 'dec_est')
observed_xy.add_column(observed_xy['ra'], name='ra_est')
observed_xy.add_column(observed_xy['dec'],name='dec_est')
# observed_xy will have field, x (px), y (px), catalog_id, and estimated ra/dec (deg).
# initializing ra_est and dec_est
observed_xy['ra_est'] = 0.0
observed_xy['dec_est'] = 0.0
###Output
_____no_output_____
###Markdown
In addition, we make another array which stores field parameters, ra and dec (deg) of the origin of the pointing and position angle, pa (deg). The plate scale, um pixel scale to deg in the sky, is assumed to be the same value for all plates. At this time, an approximated (initial guess) value is stored in a variable (plate_scale).
###Code
field_params = pointings.copy() # field, ra (deg), dec (deg), and pa (deg).
true_field_params = field_params.copy()
# field_params['pa'] -= 240.0 # offset?
# plate_scale = 8.e-6*u.deg*(pix_size/u.um).si # in deg/pix
plate_scale = 8.e-6*u.deg/u.um
print(plate_scale)
###Output
8e-06 deg / um
###Markdown
Let's check the object distribution on sky.
###Code
import matplotlib.pylab as plt
import numpy as np
color = ['red', 'blue', 'green', 'orange']
for i in range(0, np.max(field_params['field'])+1):
pos = np.where(objects['field']==i)
plt.scatter(objects['ra'][pos], objects['dec'][pos], marker='o', facecolor='None', edgecolor=color[i], s=10*i+10)
plt.xlabel('ra (deg)')
plt.ylabel('dec (deg)')
###Output
_____no_output_____
###Markdown
We can see that the data consists of four image plates (different colours indicating the objects observd by the different plantes) and that the overlapped region has a size of about a 1/4 FoV. We select the objects in the overlapped region for further analysis. Here, we select the regions all 4 plates overlaps, but we can use the region overlapped with at least 2 plates.
###Code
true_radec_overlap = true_radec.copy()
observed_xy_overlap = observed_xy.copy()
for cid in true_radec['catalog_id']:
if np.count_nonzero(observed_xy['catalog_id']==cid)!=4:
# if np.count_nonzero(observed_xy['catalog_id']==cid)<=1:
pos = np.where(true_radec_overlap['catalog_id']==cid)[0]
true_radec_overlap.remove_rows(pos)
pos = np.where(observed_xy_overlap['catalog_id']==cid)[0]
observed_xy_overlap.remove_rows(pos)
print(' The number of overlapped unique stars =', len(true_radec_overlap))
print(' The total number of observations of these overlapped stars =', len(observed_xy_overlap))
###Output
The number of overlapped unique stars = 222
The total number of observations of these overlapped stars = 888
###Markdown
Let's check the distribution of the selected unique objects.
###Code
plt.scatter(true_radec_overlap['ra'], true_radec_overlap['dec'], marker='o', facecolor='None', edgecolor='orange')
plt.xlabel('ra (deg)')
plt.ylabel('dec (deg)')
print(len(true_radec_overlap['ra']))
###Output
222
###Markdown
These objects will be used for the following analysis. We again modify the catalog id for easy handling.
###Code
ids = unique(true_radec_overlap, keys='catalog_id')['catalog_id']
true_radec_overlap.add_column(-1, name='id')
observed_xy_overlap.add_column(-1, name='id')
for i in range(0, np.size(ids)):
pos = np.where(true_radec_overlap['catalog_id']==ids[i])
true_radec_overlap['id'][pos] = i
pos = np.where(observed_xy_overlap['catalog_id']==ids[i])
observed_xy_overlap['id'][pos] = i
true_radec_overlap.remove_column('catalog_id')
true_radec_overlap.rename_column('id', 'catalog_id')
observed_xy_overlap.remove_column('catalog_id')
observed_xy_overlap.rename_column('id', 'catalog_id')
###Output
_____no_output_____
###Markdown
First guess of the positions At first, we define a wcs constructor, including SIP polynomial distortion convention, https://irsa.ipac.caltech.edu/data/SPITZER/docs/files/spitzer/shupeADASS.pdf and https://docs.astropy.org/en/stable/api/astropy.wcs.Sip.html.
###Code
from astropy.wcs import WCS
from astropy.wcs import Sip
import astropy.units as u
def wcs(ra_ptg, dec_ptg, pa_ptg, scale, a=None, b=None, ap=None, bp=None):
w = WCS(naxis=2)
w.wcs.crpix=[0,0]
w.wcs.cdelt=np.array([-scale, scale])
w.wcs.crval=[ra_ptg, dec_ptg]
w.wcs.ctype=["RA---TAN-SIP", "DEC--TAN-SIP"]
w.wcs.pc=[[ np.cos(pa_ptg*u.deg), -np.sin(pa_ptg*u.deg)],
[np.sin(pa_ptg*u.deg), np.cos(pa_ptg*u.deg)]]
# if a is not None and b is not None:
w.sip = Sip(a, b, ap, bp, [0, 0])
return w
###Output
_____no_output_____
###Markdown
Then, we estimate the sky coordinates from the observed focal-plane positions and (approximated) field parameters. Here, we do not add the distorption, but naively convert pixel coordinate (x, y) to sky coordinate, ($\alpha$, $\delta$) (ra_est, dec_est).
###Code
for i in range(0, np.size(field_params)):
fp = field_params[i]
w = wcs(fp['ra'], fp['dec'], fp['pa'], plate_scale.value)
pos = np.where(observed_xy_overlap['field']==fp['field'])
ret = w.all_pix2world(np.concatenate(([observed_xy_overlap[pos]['x']], [observed_xy_overlap[pos]['y']])).T, 0)
observed_xy_overlap['ra_est'][pos] = ret[:, 0]
observed_xy_overlap['dec_est'][pos] = ret[:, 1]
###Output
_____no_output_____
###Markdown
Let's check the true positions and estimated positions.
###Code
plt.scatter(true_radec_overlap['ra'], true_radec_overlap['dec'], marker='x', label='True')
plt.scatter(observed_xy_overlap['ra_est'], observed_xy_overlap['dec_est'], marker='+', label='Estimated')
print(' number of stars used =', len(observed_xy_overlap['ra_est']))
plt.xlabel('ra (deg)')
plt.ylabel('dec (deg)')
plt.legend()
###Output
number of stars used = 888
###Markdown
Test for distortion with A/B, using the following c and d.
###Code
c = np.zeros(shape=(3, 3))
d = np.zeros(shape=(3, 3))
c[0,2]=-2.34153374723336e-09
c[1,1]=1.5792128155073916e-08
c[1,2]=7.674347291529089e-15
c[2,0]=-4.694743859349522e-09
c[2,1]=5.4256004358596465e-15
c[2,2]=-4.6341769281246224e-21
d[0,2]=-1.913280244657798e-08
d[1,1]=-5.622875292409728e-09
d[1,2]=-1.0128311203344238e-14
d[2,0]=3.1424733259527392e-09
d[2,1]=-9.08024075521211e-15
d[2,2]=-1.4123037013352912e-20
###Output
_____no_output_____
###Markdown
We check if all_pix2world takes into account SIP parameters of A and B by comparing ($\alpha$, $\delta$) converted from (x, y) pixel coordinate without distortion (above observed_xy_overlap['ra_est'] and observed_xy_overlap['dec_est']) and ($\alpha$, $\delta$) converted from (x, y) pixel coordinate with A and B, ra_dist, dec_dist below.
###Code
# print(observed_xy_overlap['ra_est'])
c *= 100.0
ra_dist = np.zeros_like(observed_xy_overlap['ra_est'])
dec_dist = np.zeros_like(observed_xy_overlap['dec_est'])
for i in range(0, np.size(field_params)):
fp = field_params[i]
w = wcs(fp['ra'], fp['dec'], fp['pa'], plate_scale.value, a=c, b=d)
pos = np.where(observed_xy_overlap['field']==fp['field'])
ret = w.all_pix2world(np.concatenate(([observed_xy_overlap[pos]['x']], [observed_xy_overlap[pos]['y']])).T, 0)
ra_dist[pos] = ret[:,0]
dec_dist[pos] = ret[:,1]
print(' diff ra=', ra_dist-observed_xy_overlap['ra_est'])
print(' diff dec=', dec_dist-observed_xy_overlap['dec_est'])
plt.scatter(ra_dist, dec_dist, marker='x', label='Distorted')
plt.scatter(observed_xy_overlap['ra_est'], observed_xy_overlap['dec_est'], marker='+', label='No distortion')
print(' number of stars used =', len(observed_xy_overlap['ra_est']))
plt.xlabel('ra (deg)')
plt.ylabel('dec (deg)')
plt.legend()
###Output
diff ra= ra_est
deg
-----------------------
1.2170394995791867e-05
5.816765053623385e-06
-9.162415494756715e-06
-1.7497795170129393e-05
-1.4185463442117907e-05
-1.119997426712871e-05
6.512692038995738e-06
5.819727562084154e-07
1.0868943661535013e-05
8.796283736955957e-07
...
-2.5157424943245132e-06
-8.514140176885121e-06
2.359488917136332e-06
4.017218486751517e-06
7.2278966172234504e-06
3.721560091207721e-05
1.5106658224794955e-05
3.1841070267546456e-05
4.521866071627301e-05
9.789179591734865e-05
Length = 888 rows
diff dec= dec_est
deg
-----------------------
1.631389871903366e-05
7.99703507325944e-06
-1.3330118619592213e-05
-2.6079412609902874e-05
-2.134303151279937e-05
-1.6830686099922332e-05
7.842507688593514e-06
8.140157774505496e-07
1.477228683555154e-05
1.222328322114663e-06
...
-3.754488030693892e-06
-1.2820045057537754e-05
2.7059028404607943e-06
5.122925756495533e-06
9.55290414594856e-06
5.3387304738805597e-05
2.18593604692785e-05
4.474184625280486e-05
6.40481996825315e-05
0.0001391540547892589
Length = 888 rows
number of stars used = 888
###Markdown
Check if these stars cover the large enough detector region by looking at their (x, y) position in the detector coordinate.
###Code
plt.scatter(objects['x'], objects['y'], marker='x', label='All', s=5)
print(' number of all stars=', len(objects['x']))
plt.scatter(observed_xy_overlap['x'], observed_xy_overlap['y'], marker='+', label='Overlap')
plt.xlabel('x (pix)')
plt.ylabel('y (pix)')
###Output
number of all stars= 2563
###Markdown
Here, there are four estimated (ignoring distortion) positions, (observed_xy_overlap['ra_est'], observed_xy_overlap['dec_est']), in the sky coordinate for each unique object. We take their mean values as the first-guess positions and store them in radec_est array.
###Code
from astropy.table import Table
radec_est = Table(names=['catalog_id', 'ra_est', 'dec_est'], \
dtype=['int64', 'float64', 'float64'])
# units=[None, u.deg, u.deg], \
# dtype=['int64', 'float64', 'float64'])
radec_est['ra_est'].unit = u.deg
radec_est['dec_est'].unit = u.deg
cat_ids = unique(observed_xy_overlap, 'catalog_id')['catalog_id']
for i in cat_ids:
pos = np.where(observed_xy_overlap['catalog_id'] == i)
ra = np.mean(observed_xy_overlap[pos]['ra_est'])*u.deg
dec = np.mean(observed_xy_overlap[pos]['dec_est'])*u.deg
radec_est.add_row([i, ra, dec])
# print('radec_est=', radec_est)
###Output
_____no_output_____
###Markdown
Let's check the estimated positions.
###Code
plt.scatter(true_radec_overlap['ra'], true_radec_overlap['dec'], marker='x', label='True')
plt.scatter(radec_est['ra_est'], radec_est['dec_est'], marker='+', label='First guess')
plt.xlabel('ra (deg)')
plt.ylabel('dec (deg)')
plt.legend()
###Output
_____no_output_____
###Markdown
Parameter adjustment At first, we define a function which calculates x/y positions from the ra/dec values estimated above and the field/catalog ids.
###Code
def xy_calculator(observed_xy, field_params, plate_scale, ap, bp, radec_info):
# observed_xy: consists of field, x (px), y (px), catalog_id, ra_est (deg), and dec_est(deg).
# field_params: consists of field, ra (deg), dec (deg), and pa (deg).
# radec_info: consists of catalog_id, ra_est (deg), and dec_est (deg).
observed_xy_cp = observed_xy.copy()
observed_xy_cp.rename_column('x', 'x_est')
observed_xy_cp.rename_column('y', 'y_est')
observed_xy_cp['x_est'] = None
observed_xy_cp['y_est'] = None
observed_xy_cp['ra_est'] = None
observed_xy_cp['dec_est'] = None
for i in range(0, np.size(radec_info)):
pos = np.where(observed_xy_cp['catalog_id']==radec_info[i]['catalog_id'])
observed_xy_cp['ra_est'][pos] = radec_info[i]['ra_est']
observed_xy_cp['dec_est'][pos] = radec_info[i]['dec_est']
for i in range(0, np.size(field_params)):
fp = field_params[i]
w = wcs(fp['ra'], fp['dec'], fp['pa'], plate_scale, ap=ap, bp=bp)
pos = np.where(observed_xy_cp['field']==fp['field'])
radec0 = np.concatenate(([observed_xy_cp[pos]['ra_est']], [observed_xy_cp[pos]['dec_est']])).T
ret = w.sip_foc2pix(w.wcs_world2pix(radec0, 1)-w.wcs.crpix, 1)
observed_xy_cp['x_est'][pos] = ret[:, 0]
observed_xy_cp['y_est'][pos] = ret[:, 1]
return observed_xy_cp['x_est', 'y_est']
###Output
_____no_output_____
###Markdown
Next, we define a function to map from (x, y) pixel coordinate to ($\alpha$, $\beta$), using A/B Sip distortion parameters using wcs.all_pix2world, https://docs.astropy.org/en/stable/api/astropy.wcs.WCS.htmlastropy.wcs.WCS.all_pix2world with input field parameters of $\alpha_{\rm ptgs}$ (deg), $\delta_{\rm ptgs}$ (deg) and pa$_{\rm ptgs}$ (deg) of each field (plate) pointing. This conversion is described as follows. Here, we follow the description at https://www.stsci.edu/itt/review/DrizzlePac/HTML/ch33.html DefinitionCRVAL1: $\alpha_{\rm ptgs}$ right assension at the pointing centre.CRVAL2: $\delta_{\rm ptgs}$ declination at the pointing centre.CRPIX1: the x reference location of the image plate, corresponding to the pointing centre. We set CRPIX1=0.CRPIX2: the yu reference location of the image plate, corresponding to the pointing centre. We set CRPIX2=0. wcs compute the sky coordidate, ($\alpha$, $\delta$) of star at (x, y) on the detector as follows.We $ \begin{pmatrix}\alpha \\\delta \\\end{pmatrix}=\begin{pmatrix}\cos({\rm pa_{ptgs}}) & -\sin({\rm pa_{ptgs}}) \\\sin({\rm pa_{ptgs}}) & \cos({\rm pa_{ptgs}}) \\\end{pmatrix}$
###Code
def radec_calculator_ab(observed_xy, field_params, plate_scale, a, b):
# observed_xy: consists of field, x (px), y (px), catalog_id, ra_est (deg), and dec_est(deg).
# field_params: consists of field, ra (deg), dec (deg), and pa (deg).
observed_xy_cp = observed_xy.copy()
# observed_xy_cp.rename_column('x', 'x_est')
# observed_xy_cp.rename_column('y', 'y_est')
# observed_xy_cp['x_est'] = None
# observed_xy_cp['y_est'] = None
observed_xy_cp['ra_est'] = None
observed_xy_cp['dec_est'] = None
for i in range(0, np.size(field_params)):
fp = field_params[i]
w = wcs(fp['ra'], fp['dec'], fp['pa'], plate_scale, a=a, b=b)
pos = np.where(observed_xy_cp['field']==fp['field'])
pix0 = np.concatenate(([observed_xy_cp[pos]['x']], [observed_xy_cp[pos]['y']])).T
ret = w.all_pix2world(pix0, 1)
# ret = w.sip_pix2foc(w.wcs_pix2world(pix0, 1)-w.wcs.crval, 1)
observed_xy_cp['ra_est'][pos] = ret[:, 0]
observed_xy_cp['dec_est'][pos] = ret[:, 1]
return observed_xy_cp['ra_est', 'dec_est']
###Output
_____no_output_____
###Markdown
Using scipy.optimize least_squares, assuming the pointing sky coordinate, RA, DEC are accurately known. Define model function to solve with Least Squares.
###Code
# def model_func(params, n_fields, dim_sip, observed_xy):
def model_func(params, ra_ptgs, dec_ptgs, n_fields, dim_sip, observed_xy):
# params = (ra_ptgs, dec_ptgs, pa_ptg..., scale, a..., b...)
pa_ptgs, scale, a, b =\
np.split(params, [n_fields, n_fields+1,\
n_fields+1+(dim_sip+1)**2])
# ra_ptgs, dec_ptgs, pa_ptgs, scale, a, b =\
# np.split(params, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\
# 3*n_fields+1+(dim_sip+1)**2])
field_params = Table(data=[ra_ptgs, dec_ptgs, pa_ptgs, -np.ones(shape=(np.size(ra_ptgs)))],\
names=['ra', 'dec', 'pa', 'field'],\
dtype=['float64', 'float64', 'float64', 'int64'])
# names=['ra', 'dec', 'pa', 'field'],\
# units=[u.deg, u.deg, u.deg, None],\
# dtype=['float64', 'float64', 'float64', 'int64'])
field_params['ra'].unit = u.deg
field_params['dec'].unit = u.deg
field_params['pa'].unit = u.deg
field_params['field'] = np.arange(0, np.size(field_params))
# use copy of observed_xy
observed_xy_cp = observed_xy.copy()
a_matrix = np.reshape(a, (dim_sip+1, dim_sip+1))
b_matrix = np.reshape(b, (dim_sip+1, dim_sip+1))
# mns = np.concatenate(((0, 1), np.arange(dim_sip+1, 2*(dim_sip)+1)))
# for mn in mns:
# for m in range(np.max([0, mn-dim_sip]), np.min([mn+1, dim_sip+1])):
# n = mn - m
# ap_matrix[m, n] = 0
# bp_matrix[m, n] = 0
# a_matrix[0, 0] = 0.0
# a_matrix[0, 1] = 0.0
# a_matrix[1, 0] = 0.0
# b_matrix[0, 0] = 0.0
# b_matrix[0, 1] = 0.0
# b_matrix[1, 0] = 0.0
m, n = np.indices((dim_sip+1, dim_sip+1))
mn = m + n
a_matrix = a_matrix * (1.e-3**mn)
b_matrix = b_matrix * (1.e-3**mn)
# compute ra/dec from x/y with the parameters.
ret = radec_calculator_ab(observed_xy_cp, field_params, scale[0], \
a_matrix, b_matrix)
observed_xy_cp['ra_est'] = ret['ra_est']
observed_xy_cp['dec_est'] = ret['dec_est']
# compute the mean ra/dec for unique stars
cat_ids = unique(observed_xy_cp, 'catalog_id')['catalog_id']
ra_mean = np.zeros_like(observed_xy_cp['ra_est'])
dec_mean = np.zeros_like(observed_xy_cp['ra_est'])
for i in cat_ids:
pos = np.where(observed_xy_cp['catalog_id'] == i)
ra_mean[pos] = np.mean(observed_xy_cp[pos]['ra_est'])*u.deg
dec_mean[pos] = np.mean(observed_xy_cp[pos]['dec_est'])*u.deg
radec_est = np.concatenate((observed_xy_cp['ra_est'], observed_xy_cp['dec_est']))
radec_est_mean = np.concatenate((ra_mean, dec_mean))
residuals = radec_est - radec_est_mean
return residuals
###Output
_____no_output_____
###Markdown
Next, we execute the least-square calculation to derive the field parameters and sky positions of the objects in the overlapped region.
###Code
from scipy.optimize import least_squares
import time
dim_sip = 4
a = np.zeros(shape=(dim_sip+1, dim_sip+1))
b = np.zeros(shape=(dim_sip+1, dim_sip+1))
# constructing a_init (initial parameter set).
# a_init = np.array(np.concatenate((field_params['ra'], field_params['dec'], field_params['pa'], \
# [plate_scale.value], a.flatten(), b.flatten()))) # This must be an ndarray.
a_init = np.array(np.concatenate((field_params['pa'], \
[plate_scale.value], a.flatten(), b.flatten()))) # This must be an ndarray.
print(' # of fitting parameters =', len(a_init))
# constraining ra/dec values in 'observed' between -180 and 180 deg.
# measured = np.concatenate((observed_xy_overlap['x'], observed_xy_overlap['y']))
# print(' # of data points =', len(measured))
#pos = np.where(measured>180.)
#measured[pos] -= 360.
#pos = np.where(measured<-180.)
#measured[pos] += 360.
start = time.time()
# result = least_squares(model_func, a_init, loss='linear', args=(np.size(field_params), \
# dim_sip, observed_xy_overlap), \
# verbose=2)
result = least_squares(model_func, a_init, loss='linear', args=(field_params['ra'], \
field_params['dec'], np.size(field_params), dim_sip, observed_xy_overlap), \
verbose=2)
print(' time=',time.time()-start)
## pa should be a positive value between 0 and 360.
#if result[3] < 0:
# result[3] = -result[3]
# result[2] = result[2] + 180.0
#
#if result[2] > 360.0 or result[2] < 0.0:
# result[2] = result[2]%360.0
###Output
# of fitting parameters = 55
Iteration Total nfev Cost Cost reduction Step norm Optimality
0 1 1.6628e-03 2.23e+04
1 2 1.5588e-10 1.66e-03 6.03e+02 4.68e-01
2 3 1.0926e-10 4.66e-11 1.21e+03 6.51e-03
3 4 1.0914e-10 1.21e-13 2.41e+03 1.06e-03
4 5 1.0914e-10 7.30e-15 2.41e+03 1.34e-03
5 6 1.0903e-10 1.08e-13 6.03e+02 6.90e-03
6 7 1.0895e-10 7.95e-14 6.03e+02 2.78e-03
7 20 1.0895e-10 1.00e-15 3.59e-05 1.19e-05
`xtol` termination condition is satisfied.
Function evaluations 20, initial cost 1.6628e-03, final cost 1.0895e-10, first-order optimality 1.19e-05.
time= 29.973267793655396
###Markdown
Checking results Preparation
###Code
n_fields = np.size(field_params)
n_objects = np.size(radec_est)
true_ra_ptgs = true_field_params['ra'].data
true_dec_ptgs = true_field_params['dec'].data
true_pa_ptgs = true_field_params['pa'].data
# ra_ptgs, dec_ptgs, pa_ptgs, scale, a, b =\
# np.split(result.x, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\
# 3*n_fields+1+(dim_sip+1)**2])
pa_ptgs, scale, a, b =\
np.split(result.x, [n_fields, n_fields+1,\
n_fields+1+(dim_sip+1)**2])
ra_ptgs = field_params['ra'].data
dec_ptgs = field_params['dec'].data
a_matrix = np.reshape(a, (dim_sip+1, dim_sip+1))
b_matrix = np.reshape(b, (dim_sip+1, dim_sip+1))
# A/B scaling
m, n = np.indices((dim_sip+1, dim_sip+1))
mn = m + n
a_matrix = a_matrix * (1.e-3**mn)
b_matrix = b_matrix * (1.e-3**mn)
fit_field_params = Table(data=[ra_ptgs, dec_ptgs, pa_ptgs, -np.ones(shape=(np.size(ra_ptgs)))],\
names=['ra', 'dec', 'pa', 'field'],\
dtype=['float64', 'float64', 'float64', 'int64'])
fit_field_params['ra'].unit = u.deg
fit_field_params['dec'].unit = u.deg
fit_field_params['pa'].unit = u.deg
fit_field_params['field'] = np.arange(0, np.size(field_params))
###Output
_____no_output_____
###Markdown
Pointings
###Code
print(' pointing centre (fit) ra, dec (deg) =', ra_ptgs, dec_ptgs)
print(' pointing centre (true) ra, dec (deg) =', true_ra_ptgs, true_dec_ptgs)
print(' difference ra, dec (deg) =', ra_ptgs-true_ra_ptgs, dec_ptgs-true_dec_ptgs)
###Output
pointing centre (fit) ra, dec (deg) = [265.6202439 265.70081783 265.4894155 265.56770499] [-28.85396419 -28.74323799 -28.78375368 -28.67010405]
pointing centre (true) ra, dec (deg) = [265.6202439 265.70081783 265.4894155 265.56770499] [-28.85396419 -28.74323799 -28.78375368 -28.67010405]
difference ra, dec (deg) = [0. 0. 0. 0.] [0. 0. 0. 0.]
###Markdown
Pointings position angles
###Code
print(' position angle (fit) (deg) =', pa_ptgs)
print(' position angle (true) (deg) =', true_pa_ptgs)
print(' difference =', pa_ptgs-true_pa_ptgs)
###Output
position angle (fit) (deg) = [302.02397735 301.17977836 300.93658294 301.57982414]
position angle (true) (deg) = [302.02408829 301.17958541 300.93717604 301.58002573]
difference = [-0.00011094 0.00019295 -0.00059309 -0.00020159]
###Markdown
Scale (deg/pix)
###Code
scale
print(' true scale =',(1e-6/7.3/np.pi*180.0)*u.deg/u.um)
# print(' true scale =',(1e-6/7.3/np.pi*180.0)*u.deg*(pix_size/u.um).si)
###Output
true scale = 7.848736919600318e-06 deg / um
###Markdown
A/B
###Code
print(' derived A/B matrices = ', a_matrix, b_matrix)
###Output
derived A/B matrices = [[-3.08142413e-02 5.86396811e-05 -3.24923770e-09 -1.62454741e-14
-7.17913522e-20]
[ 3.92129393e-05 -1.23610090e-09 1.22988952e-13 -1.20845957e-18
-2.00471211e-12]
[-1.05329711e-08 -4.88239084e-14 9.49772199e-19 1.68062691e-13
-5.07717860e-16]
[ 1.09624088e-13 3.59234027e-19 -1.14457382e-12 -1.34838360e-15
9.84917258e-19]
[ 1.07780798e-18 6.39667921e-13 1.53327770e-15 -7.73129177e-19
-1.17286219e-21]] [[ 3.05407744e-02 -1.03095459e-04 -4.51956415e-09 1.59727926e-13
2.82748262e-19]
[ 7.68952649e-05 -7.38105269e-09 -3.26028271e-14 8.14731120e-19
8.05542255e-13]
[-1.66808698e-09 1.42945978e-13 3.14251002e-19 -2.64759938e-13
1.10409059e-16]
[-4.59848682e-15 -2.71879966e-19 4.32185387e-13 1.07779243e-15
1.12309000e-18]
[ 7.81723600e-20 1.21900390e-12 8.53264469e-16 8.58247366e-19
3.18202077e-21]]
###Markdown
Object positions
###Code
print(' field params=', fit_field_params)
radec_objs = radec_calculator_ab(observed_xy_overlap, fit_field_params, scale[0], a_matrix, b_matrix)
plt.scatter(true_radec_overlap['ra'], true_radec_overlap['dec'], marker='x', label='True')
plt.scatter(radec_est['ra_est'], radec_est['dec_est'], marker='+', label='Initial guess')
plt.scatter(radec_objs['ra_est'], radec_objs['dec_est'], marker='.', label='Final estimation')
plt.xlabel('ra (deg)')
plt.ylabel('dec (deg)')
plt.title('Object positions')
plt.legend()
###Output
field params= ra dec pa field
deg deg deg
----------------- ------------------- ------------------ -----
265.6202439021891 -28.853964194125034 302.0239773521093 0
265.7008178261919 -28.7432379906527 301.17977835520514 1
265.4894154993913 -28.78375368278103 300.93658294042217 2
265.5677049936395 -28.670104050957786 301.5798241359601 3
###Markdown
Position difference
###Code
from astropy.coordinates import SkyCoord
distlist = []
print(np.shape(radec_objs))
for i in range(0, np.size(radec_objs)):
c1 = SkyCoord(radec_objs['ra_est'][i]*u.deg, radec_objs['dec_est'][i]*u.deg)
c2 = SkyCoord(observed_xy_overlap['ra'][i]*u.deg, observed_xy_overlap['dec'][i]*u.deg)
distlist.append(c1.separation(c2).arcsec)
distlist = np.array(distlist)
#plt.hist(np.log10(distlist))
plt.hist(distlist)
plt.xlabel("Residual (arcsec)")
plt.ylabel("Number")
dra = ((radec_objs['ra_est']-observed_xy_overlap['ra']).data)*u.deg
ddec = ((radec_objs['dec_est']-observed_xy_overlap['dec']).data)*u.deg
dra_arcsec = dra.to_value(u.arcsec)
ddec_arcsec = ddec.to_value(u.arcsec)
plt.scatter(dra_arcsec, ddec_arcsec, marker='x')
plt.xlabel('dRA (arcsec)')
plt.ylabel('dDEC (arcsec)')
#plt.xlim([-0,8, 0.0])
#plt.ylim([-0.8, 0.0])
###Output
_____no_output_____
###Markdown
Not fixing pointing RA, DEC, but use the reference stars. First, we define the model function to evaluate the difference in the sky coordinate of i-th stars, (ra, dec)i, from the individual plate, j-th plate, coordinates, (x, y)ij and the residual between (ra, dec)i and (ra, dec)k, for k-th reference stars whose (ra, dec)k is known from the other observation, e.g. Gaia.
###Code
def model_wrefs_func(params, n_fields, dim_sip, observed_xy, radec_refstars):
# params = (ra_ptgs, dec_ptgs, pa_ptg..., scale, a..., b...)
ra_ptgs, dec_ptgs, pa_ptgs, scale, a, b =\
np.split(params, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\
3*n_fields+1+(dim_sip+1)**2])
field_params = Table(data=[ra_ptgs, dec_ptgs, pa_ptgs, -np.ones(shape=(np.size(ra_ptgs)))],\
names=['ra', 'dec', 'pa', 'field'],\
dtype=['float64', 'float64', 'float64', 'int64'])
# names=['ra', 'dec', 'pa', 'field'],\
# units=[u.deg, u.deg, u.deg, None],\
# dtype=['float64', 'float64', 'float64', 'int64'])
field_params['ra'].unit = u.deg
field_params['dec'].unit = u.deg
field_params['pa'].unit = u.deg
field_params['field'] = np.arange(0, np.size(field_params))
# use copy of observed_xy
observed_xy_cp = observed_xy.copy()
a_matrix = np.reshape(a, (dim_sip+1, dim_sip+1))
b_matrix = np.reshape(b, (dim_sip+1, dim_sip+1))
# mns = np.concatenate(((0, 1), np.arange(dim_sip+1, 2*(dim_sip)+1)))
# for mn in mns:
# for m in range(np.max([0, mn-dim_sip]), np.min([mn+1, dim_sip+1])):
# n = mn - m
# ap_matrix[m, n] = 0
# bp_matrix[m, n] = 0
# a_matrix[0, 0] = 0.0
# a_matrix[0, 1] = 0.0
# a_matrix[1, 0] = 0.0
# b_matrix[0, 0] = 0.0
# b_matrix[0, 1] = 0.0
# b_matrix[1, 0] = 0.0
# normalisation.
m, n = np.indices((dim_sip+1, dim_sip+1))
mn = m + n
a_matrix = a_matrix * (1.e-3**mn)
b_matrix = b_matrix * (1.e-3**mn)
# compute ra/dec from x/y with the parameters.
ret = radec_calculator_ab(observed_xy_cp, field_params, scale[0], \
a_matrix, b_matrix)
observed_xy_cp['ra_est'] = ret['ra_est']
observed_xy_cp['dec_est'] = ret['dec_est']
# compute the mean ra/dec for unique stars
cat_ids = unique(observed_xy_cp, 'catalog_id')['catalog_id']
ra_mean = np.zeros_like(observed_xy_cp['ra_est'])
dec_mean = np.zeros_like(observed_xy_cp['ra_est'])
for i in cat_ids:
pos = np.where(observed_xy_cp['catalog_id'] == i)
ra_mean[pos] = np.mean(observed_xy_cp[pos]['ra_est'])*u.deg
dec_mean[pos] = np.mean(observed_xy_cp[pos]['dec_est'])*u.deg
# reference stars' measured mean ra, dec to be compared
# with the ra, dec of reference stars.
radec_est_refstars = radec_refstars.copy()
radec_est_refstars.rename_column('ra', 'ra_est')
radec_est_refstars.rename_column('dec', 'dec_est')
for i,id in enumerate(radec_refstars['catalog_id']):
# print('i, id=', i, id)
# print(ra_mean[observed_xy_cp['catalog_id'] == id][0])
radec_est_refstars[i]['ra_est'] = ra_mean[observed_xy_cp['catalog_id'] == id][0]
radec_est_refstars[i]['dec_est'] = dec_mean[observed_xy_cp['catalog_id'] == id][0]
radec_est = np.concatenate((observed_xy_cp['ra_est'], observed_xy_cp['dec_est'], \
radec_refstars['ra'], radec_refstars['dec']))
radec_est_mean = np.concatenate((ra_mean, dec_mean, radec_est_refstars['ra_est'], \
radec_est_refstars['dec_est']))
residuals = radec_est - radec_est_mean
return residuals
###Output
_____no_output_____
###Markdown
Pick the reference stars from true_radec_overlap of overlap stars.
###Code
# print(' true_radec_overlap =', true_radec_overlap)
print(' len =', len(true_radec_overlap))
# number of reference stars
n_refstars = 10
pos = np.random.choice(len(true_radec_overlap), size=n_refstars, replace=False)
radec_refstars = true_radec_overlap[pos]
print(radec_refstars)
###Output
len = 222
ra dec catalog_id
deg deg
------------------ ------------------- ----------
265.6605763504146 -28.729564945859636 159
265.5693192785384 -28.82266494434485 86
265.59348590874885 -28.78722659354972 135
265.5412371867324 -28.701855330665776 179
265.66493934481815 -28.697143318169438 167
265.5948456494816 -28.779637387219932 136
265.55281054576335 -28.76578599577947 116
265.6721736741642 -28.804517962312964 35
265.6389435993766 -28.810982620638814 40
265.54644458107134 -28.765696703260986 115
###Markdown
Now, let's run least_squares and get the distortion parameters with the reference stars' constraints.
###Code
from scipy.optimize import least_squares
import time
dim_sip = 4
a = np.zeros(shape=(dim_sip+1, dim_sip+1))
b = np.zeros(shape=(dim_sip+1, dim_sip+1))
# constructing a_init (initial parameter set).
a_init = np.array(np.concatenate((field_params['ra'], field_params['dec'], \
field_params['pa'], \
[plate_scale.value], a.flatten(), b.flatten()))) # This must be an ndarray.
# a_init = np.array(np.concatenate((field_params['pa'], \
# [plate_scale.value], a.flatten(), b.flatten()))) # This must be an ndarray.
print(' # of fitting parameters =', len(a_init))
print(' size of reference stars =', np.size(radec_refstars['catalog_id']))
start = time.time()
result = least_squares(model_wrefs_func, a_init, loss='linear', args= \
(np.size(field_params), dim_sip, observed_xy_overlap, \
radec_refstars), verbose=2)
print(' time=',time.time()-start)
###Output
# of fitting parameters = 63
size of reference stars = 10
Iteration Total nfev Cost Cost reduction Step norm Optimality
0 1 1.6704e-03 2.24e+04
1 2 6.8229e-11 1.67e-03 8.06e+02 6.66e-01
2 3 1.7699e-11 5.05e-11 1.61e+03 1.17e-02
3 4 1.7036e-11 6.63e-13 3.22e+03 8.67e-03
4 5 1.6992e-11 4.32e-14 6.44e+03 3.38e-03
5 6 1.6991e-11 1.78e-15 1.29e+04 5.89e-04
6 7 1.6989e-11 1.83e-15 3.22e+03 1.37e-03
7 19 1.6989e-11 0.00e+00 0.00e+00 1.37e-03
`xtol` termination condition is satisfied.
Function evaluations 19, initial cost 1.6704e-03, final cost 1.6989e-11, first-order optimality 1.37e-03.
time= 30.635910034179688
###Markdown
Checking restuls Preparation
###Code
n_fields = np.size(field_params)
n_objects = np.size(radec_est)
true_ra_ptgs = true_field_params['ra'].data
true_dec_ptgs = true_field_params['dec'].data
true_pa_ptgs = true_field_params['pa'].data
ra_ptgs, dec_ptgs, pa_ptgs, scale, a, b =\
np.split(result.x, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\
3*n_fields+1+(dim_sip+1)**2])
# pa_ptgs, scale, a, b =\
# np.split(result.x, [n_fields, n_fields+1,\
# n_fields+1+(dim_sip+1)**2])
#ra_ptgs = field_params['ra'].data
# dec_ptgs = field_params['dec'].data
a_matrix = np.reshape(a, (dim_sip+1, dim_sip+1))
b_matrix = np.reshape(b, (dim_sip+1, dim_sip+1))
# A/B scaling
m, n = np.indices((dim_sip+1, dim_sip+1))
mn = m + n
a_matrix = a_matrix * (1.e-3**mn)
b_matrix = b_matrix * (1.e-3**mn)
fit_field_params = Table(data=[ra_ptgs, dec_ptgs, pa_ptgs, -np.ones(shape=(np.size(ra_ptgs)))],\
names=['ra', 'dec', 'pa', 'field'],\
dtype=['float64', 'float64', 'float64', 'int64'])
fit_field_params['ra'].unit = u.deg
fit_field_params['dec'].unit = u.deg
fit_field_params['pa'].unit = u.deg
fit_field_params['field'] = np.arange(0, np.size(field_params))
###Output
_____no_output_____
###Markdown
Pointings RA, DEC, position angle and scale
###Code
print(' pointing centre (fit) ra, dec (deg) =', ra_ptgs, dec_ptgs)
print(' pointing centre (true) ra, dec (deg) =', true_ra_ptgs, true_dec_ptgs)
print(' difference ra, dec (deg) =', ra_ptgs-true_ra_ptgs, dec_ptgs-true_dec_ptgs)
print(' position angle (fit) (deg) =', pa_ptgs)
print(' position angle (true) (deg) =', true_pa_ptgs)
print(' difference =', pa_ptgs-true_pa_ptgs)
print(' scale (fit, true) =', scale, (1e-6/7.3/np.pi*180.0)*u.deg/u.um)
print(' difference =', scale-(1e-6/7.3/np.pi*180.0))
###Output
pointing centre (fit) ra, dec (deg) = [265.62026497 265.7008384 265.48943318 265.56772513] [-28.85395174 -28.74322491 -28.7837433 -28.67009143]
pointing centre (true) ra, dec (deg) = [265.6202439 265.70081783 265.4894155 265.56770499] [-28.85396419 -28.74323799 -28.78375368 -28.67010405]
difference ra, dec (deg) = [2.10688110e-05 2.05711352e-05 1.76785986e-05 2.01409117e-05] [1.24552959e-05 1.30849269e-05 1.03781122e-05 1.26225584e-05]
position angle (fit) (deg) = [302.02342055 301.18011818 300.93655753 301.58068317]
position angle (true) (deg) = [302.02408829 301.17958541 300.93717604 301.58002573]
difference = [-0.00066774 0.00053278 -0.0006185 0.00065744]
scale (fit, true) = [7.85057877e-06] 7.848736919600318e-06 deg / um
difference = [1.84184838e-09]
###Markdown
Object positions
###Code
radec_objs = radec_calculator_ab(observed_xy_overlap, fit_field_params, scale[0], a_matrix, b_matrix)
plt.scatter(true_radec_overlap['ra'], true_radec_overlap['dec'], marker='x', label='True')
plt.scatter(radec_est['ra_est'], radec_est['dec_est'], marker='+', label='Initial guess')
plt.scatter(radec_objs['ra_est'], radec_objs['dec_est'], marker='.', label='Final estimation')
plt.scatter(radec_refstars['ra'], radec_refstars['dec'], marker='o', \
label='Reference stars')
plt.xlabel('ra (deg)')
plt.ylabel('dec (deg)')
plt.title('Object positions')
plt.legend()
###Output
_____no_output_____
###Markdown
Position differences
###Code
from astropy.coordinates import SkyCoord
distlist = []
print(np.shape(radec_objs))
for i in range(0, np.size(radec_objs)):
c1 = SkyCoord(radec_objs['ra_est'][i]*u.deg, radec_objs['dec_est'][i]*u.deg)
c2 = SkyCoord(observed_xy_overlap['ra'][i]*u.deg, observed_xy_overlap['dec'][i]*u.deg)
distlist.append(c1.separation(c2).arcsec)
distlist = np.array(distlist)
#plt.hist(np.log10(distlist))
plt.hist(distlist)
plt.xlabel("Residual (arcsec)")
plt.ylabel("Number")
dra = ((radec_objs['ra_est']-observed_xy_overlap['ra']).data)*u.deg
ddec = ((radec_objs['dec_est']-observed_xy_overlap['dec']).data)*u.deg
dra_arcsec = dra.to_value(u.arcsec)
ddec_arcsec = ddec.to_value(u.arcsec)
plt.scatter(dra_arcsec, ddec_arcsec, marker='x')
plt.xlabel('dRA (arcsec)')
plt.ylabel('dDEC (arcsec)')
#plt.xlim([-0,8, 0.0])
#plt.ylim([-0.8, 0.0])
###Output
_____no_output_____
###Markdown
Apply the field parameters to all the objects.
###Code
print(' total # of stars =', len(observed_xy))
radec_allobjs = radec_calculator_ab(observed_xy, fit_field_params, \
scale[0], a_matrix, b_matrix)
plt.scatter(observed_xy['ra'], observed_xy['dec'], marker='x', label='True')
plt.scatter(radec_allobjs['ra_est'], radec_allobjs['dec_est'], marker='.', label='Final estimation')
plt.scatter(radec_refstars['ra'], radec_refstars['dec'], marker='o', \
label='Reference stars')
plt.xlabel('ra (deg)')
plt.ylabel('dec (deg)')
plt.title('Object positions')
plt.legend()
distlist = []
print(np.shape(radec_allobjs))
for i in range(0, np.size(radec_allobjs)):
c1 = SkyCoord(radec_allobjs['ra_est'][i]*u.deg, radec_allobjs['dec_est'][i]*u.deg)
c2 = SkyCoord(observed_xy['ra'][i]*u.deg, observed_xy['dec'][i]*u.deg)
distlist.append(c1.separation(c2).arcsec)
distlist = np.array(distlist)
#plt.hist(np.log10(distlist))
plt.hist(distlist)
plt.xlabel("Residual (arcsec)")
plt.ylabel("Number")
dra = ((radec_allobjs['ra_est']-observed_xy['ra']).data)*u.deg
ddec = ((radec_allobjs['dec_est']-observed_xy['dec']).data)*u.deg
dra_arcsec = dra.to_value(u.arcsec)
ddec_arcsec = ddec.to_value(u.arcsec)
plt.scatter(dra_arcsec, ddec_arcsec, marker='x')
plt.xlabel('dRA (arcsec)')
plt.ylabel('dDEC (arcsec)')
#plt.xlim([-0,8, 0.0])
#plt.ylim([-0.8, 0.0])
###Output
_____no_output_____
###Markdown
With observatioal errors. We add the observational errors for both JASMINE observations and reference stars, Gaia stars. We first add the position error + displacement for observed_xy_overlap. Then, later we will add the noise to observed_xy (all observations). The displacement for the same observation of stars should be the same between observed_xy and observed_xy_overlap. However, for now for the simplification of set up, we use independent one.
###Code
# JASMINE pixel position uncertainty, let's set to 1/300 pix
pix_size = 15.*u.um
xy_error_jasmine = (1.0/300)*pix_size
print(' JASMINE pix error (um) =', xy_error_jasmine)
# Reference stars ra, dec error, let's set to 0.02 mas
radec_error_refstars = (0.2*u.mas).to(u.deg)
print(' Reference stars error (deg) =', radec_error_refstars)
# add errors to JASMINE pix position
# for overlap stars
observed_xy_overlap.rename_column('x', 'x0')
observed_xy_overlap.rename_column('y', 'y0')
observed_xy_overlap.add_column(observed_xy_overlap['x0'], name='x')
observed_xy_overlap.add_column(observed_xy_overlap['x0'], name='y')
observed_xy_overlap['x'] = np.random.normal(observed_xy_overlap['x0'], xy_error_jasmine)
observed_xy_overlap['y'] = np.random.normal(observed_xy_overlap['y0'], xy_error_jasmine)
# store the noise
observed_xy_overlap.add_column(observed_xy_overlap['x'], name='xy_err')
observed_xy_overlap['xy_err'] = xy_error_jasmine
# for all stars
observed_xy.rename_column('x', 'x0')
observed_xy.rename_column('y', 'y0')
observed_xy.add_column(observed_xy['x0'], name='x')
observed_xy.add_column(observed_xy['x0'], name='yt')
observed_xy['x'] = np.random.normal(observed_xy['x0'], xy_error_jasmine)
observed_xy['y'] = np.random.normal(observed_xy['y0'], xy_error_jasmine)
observed_xy.add_column(observed_xy['x'], name='xy_err')
observed_xy['xy_err'] = xy_error_jasmine
# add errors to reference stars
radec_refstars.rename_column('ra', 'ra0')
radec_refstars.rename_column('dec', 'dec0')
radec_refstars.add_column(radec_refstars['ra0'], name='ra')
radec_refstars.add_column(radec_refstars['dec0'], name='dec')
# print(' ra before noise =', radec_refstars['ra'])
radec_refstars['ra'] = np.random.normal(radec_refstars['ra0'], radec_error_refstars)
radec_refstars['dec'] = np.random.normal(radec_refstars['dec0'], radec_error_refstars)
# print(' ra w/added noise =', radec_refstars['ra'].to_value(u.mas))
# store the noise
radec_refstars.add_column(radec_refstars['ra'], name='radec_err')
radec_refstars['radec_err'] = radec_error_refstars
def model_wrefs_werr_func(params, n_fields, dim_sip, observed_xy, radec_refstars):
# params = (ra_ptgs, dec_ptgs, pa_ptg..., scale, a..., b...)
ra_ptgs, dec_ptgs, pa_ptgs, scale, a, b =\
np.split(params, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\
3*n_fields+1+(dim_sip+1)**2])
field_params = Table(data=[ra_ptgs, dec_ptgs, pa_ptgs, -np.ones(shape=(np.size(ra_ptgs)))],\
names=['ra', 'dec', 'pa', 'field'],\
dtype=['float64', 'float64', 'float64', 'int64'])
# names=['ra', 'dec', 'pa', 'field'],\
# units=[u.deg, u.deg, u.deg, None],\
# dtype=['float64', 'float64', 'float64', 'int64'])
field_params['ra'].unit = u.deg
field_params['dec'].unit = u.deg
field_params['pa'].unit = u.deg
field_params['field'] = np.arange(0, np.size(field_params))
# use copy of observed_xy
observed_xy_cp = observed_xy.copy()
a_matrix = np.reshape(a, (dim_sip+1, dim_sip+1))
b_matrix = np.reshape(b, (dim_sip+1, dim_sip+1))
# mns = np.concatenate(((0, 1), np.arange(dim_sip+1, 2*(dim_sip)+1)))
# for mn in mns:
# for m in range(np.max([0, mn-dim_sip]), np.min([mn+1, dim_sip+1])):
# n = mn - m
# ap_matrix[m, n] = 0
# bp_matrix[m, n] = 0
# a_matrix[0, 0] = 0.0
# a_matrix[0, 1] = 0.0
# a_matrix[1, 0] = 0.0
# b_matrix[0, 0] = 0.0
# b_matrix[0, 1] = 0.0
# b_matrix[1, 0] = 0.0
# normalisation.
m, n = np.indices((dim_sip+1, dim_sip+1))
mn = m + n
a_matrix = a_matrix * (1.e-3**mn)
b_matrix = b_matrix * (1.e-3**mn)
# compute ra/dec from x/y with the parameters.
ret = radec_calculator_ab(observed_xy_cp, field_params, scale[0], \
a_matrix, b_matrix)
observed_xy_cp['ra_est'] = ret['ra_est']
observed_xy_cp['dec_est'] = ret['dec_est']
# compute the mean ra/dec for unique stars
cat_ids = unique(observed_xy_cp, 'catalog_id')['catalog_id']
ra_mean = np.zeros_like(observed_xy_cp['ra_est'])
dec_mean = np.zeros_like(observed_xy_cp['dec_est'])
# compute weights from error in xy (um) -> radec (deg)
w_observed_xy = 1.0/(observed_xy_cp['xy_err']*scale[0])
for i in cat_ids:
pos = np.where(observed_xy_cp['catalog_id'] == i)
ra_mean[pos] = np.average(observed_xy_cp[pos]['ra_est'], \
weights=w_observed_xy[pos])*u.deg
dec_mean[pos] = np.average(observed_xy_cp[pos]['dec_est'], \
weights=w_observed_xy[pos])*u.deg
# reference stars' measured mean ra, dec to be compared
# with the ra, dec of reference stars.
radec_est_refstars = radec_refstars.copy()
radec_est_refstars.rename_column('ra', 'ra_est')
radec_est_refstars.rename_column('dec', 'dec_est')
# compute weights for reference stars
w_refstars = 1.0/(radec_refstars['radec_err'])
for i,id in enumerate(radec_refstars['catalog_id']):
# print('i, id=', i, id)
# print(ra_mean[observed_xy_cp['catalog_id'] == id][0])
radec_est_refstars[i]['ra_est'] = ra_mean[observed_xy_cp['catalog_id'] == id][0]
radec_est_refstars[i]['dec_est'] = dec_mean[observed_xy_cp['catalog_id'] == id][0]
radec_est = np.concatenate((observed_xy_cp['ra_est'], observed_xy_cp['dec_est'], \
radec_refstars['ra'], radec_refstars['dec']))
radec_est_mean = np.concatenate((ra_mean, dec_mean, radec_est_refstars['ra_est'], \
radec_est_refstars['dec_est']))
w_all = np.concatenate((w_observed_xy, w_observed_xy, w_refstars, w_refstars))
residuals = w_all*(radec_est - radec_est_mean)
return residuals
###Output
_____no_output_____
###Markdown
Let's run least squares.
###Code
from scipy.optimize import least_squares
from scipy.optimize import leastsq
import time
dim_sip = 4
a = np.zeros(shape=(dim_sip+1, dim_sip+1))
b = np.zeros(shape=(dim_sip+1, dim_sip+1))
# constructing a_init (initial parameter set).
a_init = np.array(np.concatenate((field_params['ra'], field_params['dec'], \
field_params['pa'], \
[plate_scale.value], a.flatten(), b.flatten()))) # This must be an ndarray.
# a_init = np.array(np.concatenate((field_params['pa'], \
# [plate_scale.value], a.flatten(), b.flatten()))) # This must be an ndarray.
print(' # of fitting parameters =', len(a_init))
print(' size of reference stars =', np.size(radec_refstars['catalog_id']))
start = time.time()
result = least_squares(model_wrefs_werr_func, a_init, loss='linear', args= \
(np.size(field_params), dim_sip, observed_xy_overlap, \
radec_refstars), verbose=2)
# result = least_squares(model_wrefs_werr_func, a_init, args= \
# (np.size(field_params), dim_sip, observed_xy_overlap, \
# radec_refstars))
print(' time=',time.time()-start)
###Output
# of fitting parameters = 63
size of reference stars = 10
Iteration Total nfev Cost Cost reduction Step norm Optimality
0 1 1.2860e+10 1.70e+17
1 2 4.5420e+06 1.29e+10 8.06e+02 2.97e+15
2 3 1.0232e+03 4.54e+06 1.61e+03 4.68e+11
3 4 1.0229e+03 2.41e-01 3.22e+03 3.30e+10
4 5 1.0229e+03 7.08e-02 8.06e+02 2.25e+11
5 13 1.0226e+03 2.41e-01 1.23e-02 2.83e+11
6 14 1.0223e+03 3.58e-01 3.07e-03 1.91e+10
7 16 1.0222e+03 4.71e-02 1.92e-04 1.62e+09
8 17 1.0222e+03 1.06e-02 4.80e-05 4.59e+08
9 18 1.0222e+03 0.00e+00 0.00e+00 4.59e+08
`xtol` termination condition is satisfied.
Function evaluations 18, initial cost 1.2860e+10, final cost 1.0222e+03, first-order optimality 4.59e+08.
time= 58.53564429283142
###Markdown
Checking results Extract the results.
###Code
n_fields = np.size(field_params)
n_objects = np.size(radec_est)
true_ra_ptgs = true_field_params['ra'].data
true_dec_ptgs = true_field_params['dec'].data
true_pa_ptgs = true_field_params['pa'].data
ra_ptgs, dec_ptgs, pa_ptgs, scale, a, b =\
np.split(result.x, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\
3*n_fields+1+(dim_sip+1)**2])
# pa_ptgs, scale, a, b =\
# np.split(result.x, [n_fields, n_fields+1,\
# n_fields+1+(dim_sip+1)**2])
#ra_ptgs = field_params['ra'].data
# dec_ptgs = field_params['dec'].data
print(' a and b matrices before scaling=', a, b)
a_matrix = np.reshape(a, (dim_sip+1, dim_sip+1))
b_matrix = np.reshape(b, (dim_sip+1, dim_sip+1))
# A/B scaling
m, n = np.indices((dim_sip+1, dim_sip+1))
mn = m + n
a_matrix = a_matrix * (1.e-3**mn)
b_matrix = b_matrix * (1.e-3**mn)
fit_field_params = Table(data=[ra_ptgs, dec_ptgs, pa_ptgs, -np.ones(shape=(np.size(ra_ptgs)))],\
names=['ra', 'dec', 'pa', 'field'],\
dtype=['float64', 'float64', 'float64', 'int64'])
fit_field_params['ra'].unit = u.deg
fit_field_params['dec'].unit = u.deg
fit_field_params['pa'].unit = u.deg
fit_field_params['field'] = np.arange(0, np.size(field_params))
###Output
a and b matrices before scaling= [-1.32081349e-02 5.59602289e-02 -3.82992723e-03 -1.34820667e-05
1.40624142e-06 1.67301441e-02 -1.69927221e-03 1.31464840e-04
5.47394747e-07 -5.74702932e+02 -1.03675418e-02 -4.95415952e-05
1.10349226e-06 5.47689003e+02 1.22561180e+03 1.10891820e-04
5.81649341e-07 2.98741443e+02 -1.39298912e+03 6.00082429e+02
2.96583292e-07 2.17562554e+02 -1.39294831e+02 -1.26835339e+03
1.47438634e+03] [-4.21882329e-02 -1.24526216e-01 -4.26370614e-03 1.54770886e-04
-7.98599435e-07 9.60302942e-02 -7.60091171e-03 -3.35100050e-05
1.10237021e-06 3.44644503e+02 -2.20928029e-03 1.42337700e-04
4.72617670e-07 -5.80732067e+01 2.02971970e+02 -4.16232682e-06
3.18372811e-07 1.10431395e+03 9.04831211e+02 -2.01552832e+03
6.24772943e-07 -9.69782999e+02 -7.29527638e+01 2.55017708e+03
3.17035709e+02]
###Markdown
Evaluate fitting. We follow https://www.fixes.pub/program/444521.html.
###Code
from scipy import linalg, optimize
chi2dof= np.sum(result.fun**2)/(result.fun.size -result.x.size)
print(' Xi^2/dof =', chi2dof)
J= result.jac
print(' shape of J =', np.shape(J))
# this does not work.
# cov= np.linalg.inv(J.T.dot(J))
# var= np.sqrt(np.diagonal(cov))
# print(' parameter variances =', var)
U, s, Vh= linalg.svd(result.jac, full_matrices=False)
tol= np.finfo(float).eps*s[0]*max(result.jac.shape)
w= s > tol
cov= (Vh[w].T/s[w]**2) @ Vh[w] # robust covariance matrix
cov *= chi2dof
perr= np.sqrt(np.diag(cov)) # 1sigma uncertainty on fitted parameters
# extract errors
ra_ptgs_err, dec_ptgs_err, pa_ptgs_err, scale_err, a_err, b_err =\
np.split(perr, [n_fields, 2*n_fields, 3*n_fields, 3*n_fields+1,\
3*n_fields+1+(dim_sip+1)**2])
# A/B scaling
a_err_matrix = np.reshape(a_err, (dim_sip+1, dim_sip+1))
b_err_matrix = np.reshape(b_err, (dim_sip+1, dim_sip+1))
# A/B scaling
m, n = np.indices((dim_sip+1, dim_sip+1))
mn = m + n
a_err_matrix = a_err_matrix * (1.e-3**mn)
b_err_matrix = b_err_matrix * (1.e-3**mn)
print(' parameter values =', ra_ptgs, dec_ptgs, pa_ptgs, scale, a_matrix, b_matrix)
print(' parameter variances =', ra_ptgs_err, dec_ptgs_err, pa_ptgs_err, scale_err, \
a_err_matrix, b_err_matrix)
###Output
Xi^2/dof = 1.1797020385256651
shape of J = (1796, 63)
parameter values = [265.62026452 265.70083811 265.48943257 265.5677245 ] [-28.8539516 -28.74322535 -28.7837429 -28.67009161] [302.02250864 301.17908395 300.93544541 301.57939874] [7.85083934e-06] [[-1.32081349e-02 5.59602289e-05 -3.82992723e-09 -1.34820667e-14
1.40624142e-18]
[ 1.67301441e-05 -1.69927221e-09 1.31464840e-13 5.47394747e-19
-5.74702932e-13]
[-1.03675418e-08 -4.95415952e-14 1.10349226e-18 5.47689003e-13
1.22561180e-15]
[ 1.10891820e-13 5.81649341e-19 2.98741443e-13 -1.39298912e-15
6.00082429e-19]
[ 2.96583292e-19 2.17562554e-13 -1.39294831e-16 -1.26835339e-18
1.47438634e-21]] [[-4.21882329e-02 -1.24526216e-04 -4.26370614e-09 1.54770886e-13
-7.98599435e-19]
[ 9.60302942e-05 -7.60091171e-09 -3.35100050e-14 1.10237021e-18
3.44644503e-13]
[-2.20928029e-09 1.42337700e-13 4.72617670e-19 -5.80732067e-14
2.02971970e-16]
[-4.16232682e-15 3.18372811e-19 1.10431395e-12 9.04831211e-16
-2.01552832e-18]
[ 6.24772943e-19 -9.69782999e-13 -7.29527638e-17 2.55017708e-18
3.17035709e-22]]
parameter variances = [7.82492025e-08 7.42759724e-08 8.50553754e-08 9.03189850e-08] [7.98223107e-08 8.25452631e-08 9.09422110e-08 8.29752733e-08] [0.00027622 0.00027445 0.00027808 0.00027606] [3.09164693e-11] [[4.82798640e-03 4.74920051e-06 7.00165144e-11 1.78235882e-15
1.85015735e-19]
[3.98689269e-06 6.43840461e-11 1.86828992e-15 1.67324652e-19
7.07266665e-28]
[6.64086122e-11 1.47160368e-15 1.28534502e-19 1.46167089e-28
7.74049145e-31]
[1.72773963e-15 1.53015882e-19 8.50839369e-28 1.02394173e-30
9.57249521e-34]
[1.98232197e-19 1.28603057e-30 8.11067626e-35 4.80611660e-37
5.81873917e-40]] [[4.89259656e-03 3.93570608e-06 5.91552113e-11 1.66743170e-15
1.72403194e-19]
[4.75667248e-06 6.42180211e-11 1.39364415e-15 1.57139542e-19
0.00000000e+00]
[7.03729127e-11 1.82095764e-15 1.21082277e-19 0.00000000e+00
0.00000000e+00]
[1.63400718e-15 1.44239818e-19 0.00000000e+00 0.00000000e+00
0.00000000e+00]
[1.90557820e-19 0.00000000e+00 0.00000000e+00 0.00000000e+00
0.00000000e+00]]
###Markdown
Pointings RA, DEC, position angle and scale
###Code
print(' pointing centre (fit) ra, dec (deg) =', ra_ptgs, dec_ptgs)
print(' pointing centre (true) ra, dec (deg) =', true_ra_ptgs, true_dec_ptgs)
print(' difference ra, dec (deg) =', ra_ptgs-true_ra_ptgs, dec_ptgs-true_dec_ptgs)
print(' uncertainty ra, dec pointings =', ra_ptgs_err, dec_ptgs_err)
print(' position angle (fit) (deg) =', pa_ptgs)
print(' position angle (true) (deg) =', true_pa_ptgs)
print(' difference =', pa_ptgs-true_pa_ptgs)
print(' uncertainty =', pa_ptgs_err)
print(' scale (fit, true) =', scale, (1e-6/7.3/np.pi*180.0)*u.deg/u.um)
print(' difference =', scale-(1e-6/7.3/np.pi*180.0))
print(' uncertainty =', scale_err)
###Output
scale (fit, true) = [7.85083934e-06] 7.848736919600318e-06 deg / um
difference = [2.10242486e-09]
uncertainty = [3.09164693e-11]
###Markdown
Objects positions
###Code
radec_objs = radec_calculator_ab(observed_xy_overlap, fit_field_params, scale[0], a_matrix, b_matrix)
plt.scatter(true_radec_overlap['ra'], true_radec_overlap['dec'], marker='x', label='True')
plt.scatter(radec_est['ra_est'], radec_est['dec_est'], marker='+', label='Initial guess')
plt.scatter(radec_objs['ra_est'], radec_objs['dec_est'], marker='.', label='Final estimation')
plt.scatter(radec_refstars['ra0'], radec_refstars['dec0'], marker='o', \
label='Reference stars')
plt.xlabel('ra (deg)')
plt.ylabel('dec (deg)')
plt.title('Object positions')
plt.legend()
distlist = []
print(np.shape(radec_objs))
for i in range(0, np.size(radec_objs)):
c1 = SkyCoord(radec_objs['ra_est'][i]*u.deg, radec_objs['dec_est'][i]*u.deg)
c2 = SkyCoord(observed_xy_overlap['ra'][i]*u.deg, observed_xy_overlap['dec'][i]*u.deg)
distlist.append(c1.separation(c2).arcsec)
distlist = np.array(distlist)
#plt.hist(np.log10(distlist))
plt.hist(distlist)
plt.xlabel("Residual (arcsec)")
plt.ylabel("Number")
dra = ((radec_objs['ra_est']-observed_xy_overlap['ra']).data)*u.deg
ddec = ((radec_objs['dec_est']-observed_xy_overlap['dec']).data)*u.deg
dra_arcsec = dra.to_value(u.arcsec)
ddec_arcsec = ddec.to_value(u.arcsec)
plt.scatter(dra_arcsec, ddec_arcsec, marker='x')
plt.xlabel('dRA (arcsec)')
plt.ylabel('dDEC (arcsec)')
###Output
_____no_output_____
###Markdown
Apply to all the data, taking into account uncertainties of their position and parameter uncertainties. We shall run Monte Carlo by randomly displacing the position of stars and distortion parameters.
###Code
n_mc = 100
n_stars = len(observed_xy)
print(' total # of stars =', n_stars)
ra_allobjs_samp = np.empty((n_stars, n_mc))
dec_allobjs_samp = np.empty((n_stars, n_mc))
observed_xy_try = observed_xy.copy()
# flattened uncertainties of a, b matrix
a_flat = a_matrix.flatten()
b_flat = b_matrix.flatten()
a_err = a_err_matrix.flatten()
b_err = b_err_matrix.flatten()
for i in range(n_mc):
# displace observed_xy positions
observed_xy_try['x'] = np.random.normal(observed_xy['x'], observed_xy['xy_err'])
observed_xy_try['y'] = np.random.normal(observed_xy['y'], observed_xy['xy_err'])
# displace the parameters
ra_ptgs_try = np.random.normal(ra_ptgs, ra_ptgs_err)
dec_ptgs_try = np.random.normal(dec_ptgs, dec_ptgs_err)
pa_ptgs_try = np.random.normal(pa_ptgs, pa_ptgs_err)
scale_try = np.random.normal(scale, scale_err)
a_try = np.random.normal(a_flat, a_err)
b_try = np.random.normal(b_flat, b_err)
a_matrix_try = np.reshape(a_try, (dim_sip+1, dim_sip+1))
b_matrix_try = np.reshape(b_try, (dim_sip+1, dim_sip+1))
fit_field_params_try = Table(data=[ra_ptgs_try, dec_ptgs_try, pa_ptgs_try, \
-np.ones(shape=(np.size(ra_ptgs)))],\
names=['ra', 'dec', 'pa', 'field'],\
dtype=['float64', 'float64', 'float64', 'int64'])
fit_field_params_try['ra'].unit = u.deg
fit_field_params_try['dec'].unit = u.deg
fit_field_params_try['pa'].unit = u.deg
fit_field_params_try['field'] = np.arange(0, np.size(field_params))
radec_allobjs_try = radec_calculator_ab(observed_xy_try, fit_field_params_try, \
scale_try[0], a_matrix_try, b_matrix_try)
ra_allobjs_samp[:, i] = radec_allobjs_try['ra_est']
dec_allobjs_samp[:, i] = radec_allobjs_try['dec_est']
ra_allobjs_mean = np.mean(ra_allobjs_samp, axis=1)
ra_allobjs_std = np.std(ra_allobjs_samp, axis=1)
dec_allobjs_mean = np.mean(dec_allobjs_samp, axis=1)
dec_allobjs_std = np.std(dec_allobjs_samp, axis=1)
# error from the true value
ra_allobjs_err = ra_allobjs_mean-observed_xy['ra']
dec_allobjs_err = dec_allobjs_mean-observed_xy['dec']
plt.scatter(ra_allobjs_err, ra_allobjs_std, marker='x', label='RA')
plt.scatter(dec_allobjs_err, dec_allobjs_std, marker='.', label='DEC')
print(' RA mean standard deviation of measurements (arcsec) =', \
(np.mean(ra_allobjs_std)*u.deg).to_value(u.arcsec))
print(' RA standard deviation from the true values (arcsec) =',
(np.std(ra_allobjs_err)*u.deg).to_value(u.arcsec))
print(' DEC mean standard deviation of measurements (arcsec) =',
(np.mean(dec_allobjs_std)*u.deg).to_value(u.arcsec))
print(' DEC standard deviation from the true values (arcsec)=',
(np.std(dec_allobjs_err)*u.deg).to_value(u.arcsec))
plt.xlabel('deviatoin from the true radec(deg)')
plt.ylabel('standar deviation of measurement (deg)')
plt.title('Object positions')
plt.legend()
###Output
total # of stars = 2563
RA mean standard deviation of measurements (arcsec) = 0.003640542851837218
RA standard deviation from the true values (arcsec) = 0.009188813913378172
DEC mean standard deviation of measurements (arcsec) = 0.003174717949413465
DEC standard deviation from the true values (arcsec)= 0.007260206365501805
|
docs/examples/jupyter/Tonnage List API/Working with the TonnageListAPI.ipynb | ###Markdown
Working with the `TonnageListAPI` SetupInstall the Signal Ocean SDK:
###Code
!pip install signal-ocean
###Output
_____no_output_____
###Markdown
Set your subscription key, acquired here: [https://apis.signalocean.com/profile](https://apis.signalocean.com/profile)
###Code
signal_ocean_api_key = "" # replace with your subscription key
###Output
_____no_output_____
###Markdown
Retrieving a historical tonnage listFirst, we need to create an instance of the `TonnageListAPI`:
###Code
from signal_ocean import Connection
from signal_ocean.tonnage_list import TonnageListAPI
connection = Connection(signal_ocean_api_key)
api = TonnageListAPI(connection)
###Output
_____no_output_____
###Markdown
Then, we need to determine the parameters of the **historical tonnage list** (**HTL**). In order to fetch an HTL, we will need to specify:- a loading port,- a vessel class,- a time frame.Ports and vessel classes can be retrieved through the `get_ports` and `get_vessel_classes` methods:
###Code
api.get_vessel_classes()
###Output
_____no_output_____
###Markdown
Ports can be looked up by their name using the `PortFilter`:
###Code
from signal_ocean.tonnage_list import PortFilter
api.get_ports(PortFilter(name_like="rot"))
###Output
_____no_output_____
###Markdown
And so can vessel classes with the use of the `VesselClassFilter`:
###Code
from signal_ocean.tonnage_list import VesselClassFilter
api.get_vessel_classes(VesselClassFilter(name_like="MAX"))
###Output
_____no_output_____
###Markdown
Note that the search is case-insensitive and does not require specifying exact names.We want our HTL to contain Aframax vessels in Ceyhan, with a 6-day forward laycan end, for the last 90 days:
###Code
from datetime import timedelta, date
vessel_class_filter = VesselClassFilter(name_like="aframax")
vessel_class = api.get_vessel_classes(vessel_class_filter)[0]
port_filter = PortFilter(name_like="ceyhan")
port = api.get_ports(port_filter)[0]
laycan_end_in_days = 6
today = date.today()
start_date = today - timedelta(days=5)
###Output
_____no_output_____
###Markdown
With the parameters above, we can now request an HTL:
###Code
from signal_ocean.tonnage_list import DateRange
htl = api.get_historical_tonnage_list(
port, vessel_class, laycan_end_in_days, DateRange(start_date, today)
)
###Output
_____no_output_____
###Markdown
The resulting historical tonnage list is a Python object that contains a collection of tonnage lists, each of which has a timestamp and a collection of vessel data. The tonnage lists are ordered by date in descending order:
###Code
yesterdays_tl = htl[1]
print("Date:", yesterdays_tl.date)
print("Vessel count:", len(yesterdays_tl.vessels))
print("Example vessel:", yesterdays_tl.vessels[0])
###Output
Date: 2021-10-12 12:00:00+00:00
Vessel count: 81
Example vessel: Vessel(imo=9486910, name='Beta', vessel_class='Aframax', ice_class=None, year_built=2010, deadweight=105319, length_overall=228.6, breadth_extreme=42, market_deployment='Contract', push_type='Not Pushed', open_port='Rotterdam', open_date=datetime.datetime(2019, 10, 15, 12, 37, 38, 306000, tzinfo=datetime.timezone.utc), operational_status='Laden', commercial_operator='Held Maritime Shipping', commercial_status='Available', eta=datetime.datetime(2019, 10, 27, 9, 0, tzinfo=datetime.timezone.utc), latest_ais=datetime.datetime(2019, 10, 13, 4, 47, 29, tzinfo=datetime.timezone.utc), subclass='Dirty', willing_to_switch_subclass=False, open_prediction_accuracy='Narrow Area', open_areas=(Area(name='Continent', location_taxonomy='Narrow Area'), Area(name='UK Continent', location_taxonomy='Wide Area'), Area(name='Netherlands', location_taxonomy='Country')), availability_port_type='Prediction', availability_date_type='Prediction')
###Markdown
The result can also be converted into a Pandas data frame:
###Code
data_frame = htl.to_data_frame()
data_frame
###Output
_____no_output_____
###Markdown
Example 1 - Plotting a supply trendThe data frame format makes it very easy to generate a supply trend plot.We'll generate a supply trend from the beginning of the year, but we'll also filter the vessel list by looking for vessels that:- are pushed,- have a market deployment type of "Relet" or "Spot",- their commercial status is available, cancelled or failed,- are crude oil tankers (their vessel subclass is "Dirty"),- their AIS information is no older than 5 days.Filtering can be achieved by creating an instance of a `VesselFilter` and passing it to the `get_historical_tonnage_list` method. A `VesselFilter` meeting the above criteria will look as follows:
###Code
from signal_ocean.tonnage_list import (
VesselFilter,
PushType,
MarketDeployment,
CommercialStatus,
VesselSubclass,
)
vessel_filter = VesselFilter(
push_types=[PushType.PUSHED],
market_deployments=[MarketDeployment.RELET, MarketDeployment.SPOT],
commercial_statuses=[
CommercialStatus.AVAILABLE,
CommercialStatus.CANCELLED,
CommercialStatus.FAILED,
],
vessel_subclass=VesselSubclass.DIRTY,
latest_ais_since=5,
)
###Output
_____no_output_____
###Markdown
Note the usage of the `PushType`, `MarketDeployment`, `CommercialStatus`, and `VesselSubclass`. These are enum-like classes that contain constants for all the possible values for a given `VesselFilter` parameter. To list the available values for any of the classes, just invoke `list()` on the class:
###Code
list(CommercialStatus)
###Output
_____no_output_____
###Markdown
You can use these values directly or use a corresponding class member:
###Code
CommercialStatus.ON_SUBS == 'On Subs'
###Output
_____no_output_____
###Markdown
Let's get the HTL for our filter:
###Code
beginning_of_year = date(today.year, 1, 1)
htl_for_supply_trend = api.get_historical_tonnage_list(
port,
vessel_class,
laycan_end_in_days,
DateRange(start_date, today),
vessel_filter=vessel_filter,
)
supply_trend_data_frame = htl_for_supply_trend.to_data_frame()
supply_trend_data_frame
###Output
_____no_output_____
###Markdown
Now, we can generate the plot:
###Code
from signal_ocean.tonnage_list import IndexLevel
supply_trend = supply_trend_data_frame.groupby(
IndexLevel.DATE, sort=True
).size()
plot = supply_trend.plot()
plot.set_ylabel("Vessel count")
plot
###Output
_____no_output_____
###Markdown
Example 2 - Generating an Excel sheetThe data frame can be easily saved as an Excel file by using Pandas's built-in `to_excel()` function.Before we do that, we need to remove all the time zone information from all timestamps in the data frame. This is because Excel does not support storing time zone information along with timestamps. However, Signal Ocean's SDK always provides time zone information to make all timestamp-based computation unambiguous.
###Code
from signal_ocean.tonnage_list import Column
without_time_zones = (
supply_trend_data_frame.reset_index()
.astype(
{
IndexLevel.DATE: "datetime64[ns]",
Column.OPEN_DATE: "datetime64[ns]",
Column.ETA: "datetime64[ns]",
Column.LATEST_AIS: "datetime64[ns]",
}
)
.set_index([IndexLevel.DATE, IndexLevel.IMO])
)
###Output
_____no_output_____
###Markdown
Now, we can generate the Excel file:
###Code
without_time_zones.to_excel('htl.xlsx')
###Output
_____no_output_____
###Markdown
Retrieving a live tonnage listRetrieving a live tonnage list is almost exactly the same as getting a historical one except, instead of using the `get_historical_tonnage_list` method, you use the `get_tonnage_list` method and you don't pass a `DateRange` as an argument. The `get_tonnage_list` method returns a single `TonnageList` that contains live vessel data.Because of this similarity, we can reuse the parameters we used for our HTL queries:
###Code
tonnage_list = api.get_tonnage_list(
port, vessel_class, laycan_end_in_days, vessel_filter
)
tonnage_list
###Output
_____no_output_____
###Markdown
We can also convert the resulting tonnage list to a data frame:
###Code
tonnage_list.to_data_frame()
###Output
_____no_output_____
###Markdown
Working with the `TonnageListAPI` Run this example in [Colab](https://colab.research.google.com/github/SignalOceanSdk/SignalSDK/blob/master/docs/examples/jupyter/Tonnage%20List%20API/Working%20with%20the%20TonnageListAPI.ipynb). SetupInstall the Signal Ocean SDK:
###Code
!pip install signal-ocean
###Output
_____no_output_____
###Markdown
Set your subscription key, acquired here: [https://apis.signalocean.com/profile](https://apis.signalocean.com/profile)
###Code
signal_ocean_api_key = "" # replace with your subscription key
###Output
_____no_output_____
###Markdown
Retrieving a historical tonnage listFirst, we need to create an instance of the `TonnageListAPI`:
###Code
from signal_ocean import Connection
from signal_ocean.tonnage_list import TonnageListAPI
connection = Connection(signal_ocean_api_key)
api = TonnageListAPI(connection)
###Output
_____no_output_____
###Markdown
Then, we need to determine the parameters of the **historical tonnage list** (**HTL**). In order to fetch an HTL, we will need to specify:- a loading port,- a vessel class,- a time frame.Ports and vessel classes can be retrieved through the `get_ports` and `get_vessel_classes` methods:
###Code
api.get_vessel_classes()
###Output
_____no_output_____
###Markdown
Ports can be looked up by their name using the `PortFilter`:
###Code
from signal_ocean.tonnage_list import PortFilter
api.get_ports(PortFilter(name_like="rot"))
###Output
_____no_output_____
###Markdown
And so can vessel classes with the use of the `VesselClassFilter`:
###Code
from signal_ocean.tonnage_list import VesselClassFilter
api.get_vessel_classes(VesselClassFilter(name_like="MAX"))
###Output
_____no_output_____
###Markdown
Note that the search is case-insensitive and does not require specifying exact names.We want our HTL to contain Aframax vessels in Ceyhan, with a 6-day forward laycan end, for the last 90 days:
###Code
from datetime import timedelta, date
vessel_class_filter = VesselClassFilter(name_like="aframax")
vessel_class = api.get_vessel_classes(vessel_class_filter)[0]
port_filter = PortFilter(name_like="ceyhan")
port = api.get_ports(port_filter)[0]
laycan_end_in_days = 6
today = date.today()
start_date = today - timedelta(days=5)
###Output
_____no_output_____
###Markdown
With the parameters above, we can now request an HTL:
###Code
from signal_ocean.tonnage_list import DateRange
htl = api.get_historical_tonnage_list(
port, vessel_class, laycan_end_in_days, DateRange(start_date, today)
)
###Output
_____no_output_____
###Markdown
The resulting historical tonnage list is a Python object that contains a collection of tonnage lists, each of which has a timestamp and a collection of vessel data. The tonnage lists are ordered by date in descending order:
###Code
yesterdays_tl = htl[1]
print("Date:", yesterdays_tl.date)
print("Vessel count:", len(yesterdays_tl.vessels))
print("Example vessel:", yesterdays_tl.vessels[0])
###Output
Date: 2021-11-17 12:00:00+00:00
Vessel count: 76
Example vessel: Vessel(imo=8508292, name='Gunung Kemala', vessel_class='Aframax', ice_class=None, year_built=1986, deadweight=86962, length_overall=242.0, breadth_extreme=42, market_deployment='Program', push_type='Not Pushed', open_port='Balongan Terminal', open_date=datetime.datetime(2021, 2, 16, 2, 16, 32, 879000, tzinfo=datetime.timezone.utc), operational_status='Loading', commercial_operator='Pertamina', commercial_status='Available', eta=datetime.datetime(2021, 3, 9, 7, 0, tzinfo=datetime.timezone.utc), latest_ais=datetime.datetime(2021, 2, 9, 16, 4, 53, tzinfo=datetime.timezone.utc), subclass='Dirty', willing_to_switch_subclass=False, open_prediction_accuracy='Narrow Area', open_areas=(Area(name='Indonesia', location_taxonomy='Narrow Area'), Area(name='South East Asia', location_taxonomy='Wide Area'), Area(name='Indonesia', location_taxonomy='Country')), availability_port_type='Prediction', availability_date_type='Prediction')
###Markdown
The result can also be converted into a Pandas data frame:
###Code
data_frame = htl.to_data_frame()
data_frame
###Output
_____no_output_____
###Markdown
Example 1 - Plotting a supply trendThe data frame format makes it very easy to generate a supply trend plot.We'll generate a supply trend from the beginning of the year, but we'll also filter the vessel list by looking for vessels that:- are pushed,- have a market deployment type of "Relet" or "Spot",- their commercial status is available, cancelled or failed,- are crude oil tankers (their vessel subclass is "Dirty"),- their AIS information is no older than 5 days.Filtering can be achieved by creating an instance of a `VesselFilter` and passing it to the `get_historical_tonnage_list` method. A `VesselFilter` meeting the above criteria will look as follows:
###Code
from signal_ocean.tonnage_list import (
VesselFilter,
PushType,
MarketDeployment,
CommercialStatus,
VesselSubclass,
)
vessel_filter = VesselFilter(
push_types=[PushType.PUSHED],
market_deployments=[MarketDeployment.RELET, MarketDeployment.SPOT],
commercial_statuses=[
CommercialStatus.AVAILABLE,
CommercialStatus.CANCELLED,
CommercialStatus.FAILED,
],
vessel_subclass=VesselSubclass.DIRTY,
latest_ais_since=5,
)
###Output
_____no_output_____
###Markdown
Note the usage of the `PushType`, `MarketDeployment`, `CommercialStatus`, and `VesselSubclass`. These are enum-like classes that contain constants for all the possible values for a given `VesselFilter` parameter. To list the available values for any of the classes, just invoke `list()` on the class:
###Code
list(CommercialStatus)
###Output
_____no_output_____
###Markdown
You can use these values directly or use a corresponding class member:
###Code
CommercialStatus.ON_SUBS == 'On Subs'
###Output
_____no_output_____
###Markdown
Let's get the HTL for our filter:
###Code
beginning_of_year = date(today.year, 1, 1)
htl_for_supply_trend = api.get_historical_tonnage_list(
port,
vessel_class,
laycan_end_in_days,
DateRange(start_date, today),
vessel_filter=vessel_filter,
)
supply_trend_data_frame = htl_for_supply_trend.to_data_frame()
supply_trend_data_frame
###Output
_____no_output_____
###Markdown
Now, we can generate the plot:
###Code
from signal_ocean.tonnage_list import IndexLevel
supply_trend = supply_trend_data_frame.groupby(
IndexLevel.DATE, sort=True
).size()
plot = supply_trend.plot()
plot.set_ylabel("Vessel count")
plot.set_title("Ceyhan Aframax Laycan 6 days")
plot
###Output
_____no_output_____
###Markdown
Example 2 - Generating an Excel sheetThe data frame can be easily saved as an Excel file by using Pandas's built-in `to_excel()` function.Before we do that, we need to remove all the time zone information from all timestamps in the data frame. This is because Excel does not support storing time zone information along with timestamps. However, Signal Ocean's SDK always provides time zone information to make all timestamp-based computation unambiguous.
###Code
from signal_ocean.tonnage_list import Column
without_time_zones = (
supply_trend_data_frame.reset_index()
.astype(
{
IndexLevel.DATE: "datetime64[ns]",
Column.OPEN_DATE: "datetime64[ns]",
Column.ETA: "datetime64[ns]",
Column.LATEST_AIS: "datetime64[ns]",
}
)
.set_index([IndexLevel.DATE, IndexLevel.IMO])
)
###Output
C:\Users\PC863~1.ROM\AppData\Local\Temp/ipykernel_16416/191344502.py:4: FutureWarning: Using .astype to convert from timezone-aware dtype to timezone-naive dtype is deprecated and will raise in a future version. Use obj.tz_localize(None) or obj.tz_convert('UTC').tz_localize(None) instead
supply_trend_data_frame.reset_index()
###Markdown
Now, we can generate the Excel file:
###Code
without_time_zones.to_excel('Ceyhan_Afra_6days_history.xlsx')
###Output
_____no_output_____ |
Capston week1/3.ipynb | ###Markdown
**Space X Falcon 9 First Stage Landing Prediction** Lab 2: Data wrangling Estimated time needed: **60** minutes In this lab, we will perform some Exploratory Data Analysis (EDA) to find some patterns in the data and determine what would be the label for training supervised models.In the data set, there are several different cases where the booster did not land successfully. Sometimes a landing was attempted but failed due to an accident; for example, True Ocean means the mission outcome was successfully landed to a specific region of the ocean while False Ocean means the mission outcome was unsuccessfully landed to a specific region of the ocean. True RTLS means the mission outcome was successfully landed to a ground pad False RTLS means the mission outcome was unsuccessfully landed to a ground pad.True ASDS means the mission outcome was successfully landed on a drone ship False ASDS means the mission outcome was unsuccessfully landed on a drone ship.In this lab we will mainly convert those outcomes into Training Labels with `1` means the booster successfully landed `0` means it was unsuccessful. Falcon 9 first stage will land successfully  Several examples of an unsuccessful landing are shown here:  ObjectivesPerform exploratory Data Analysis and determine Training Labels* Exploratory Data Analysis* Determine Training Labels *** Import Libraries and Define Auxiliary Functions We will import the following libraries.
###Code
# Pandas is a software library written for the Python programming language for data manipulation and analysis.
import pandas as pd
#NumPy is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays
import numpy as np
###Output
_____no_output_____
###Markdown
Data Analysis Load Space X dataset, from last section.
###Code
df=pd.read_csv("https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/datasets/dataset_part_1.csv")
df.head(10)
###Output
_____no_output_____
###Markdown
Identify and calculate the percentage of the missing values in each attribute
###Code
df.isnull().sum()/df.count()*100
###Output
_____no_output_____
###Markdown
Identify which columns are numerical and categorical:
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
TASK 1: Calculate the number of launches on each siteThe data contains several Space X launch facilities: Cape Canaveral Space Launch Complex 40 VAFB SLC 4E , Vandenberg Air Force Base Space Launch Complex 4E (SLC-4E), Kennedy Space Center Launch Complex 39A KSC LC 39A .The location of each Launch Is placed in the column LaunchSite Next, let's see the number of launches for each site.Use the method value_counts() on the column LaunchSite to determine the number of launches on each site:
###Code
# Apply value_counts() on column LaunchSite
df['LaunchSite'].value_counts()
###Output
_____no_output_____
###Markdown
Each launch aims to an dedicated orbit, and here are some common orbit types: * LEO: Low Earth orbit (LEO)is an Earth-centred orbit with an altitude of 2,000 km (1,200 mi) or less (approximately one-third of the radius of Earth),\[1] or with at least 11.25 periods per day (an orbital period of 128 minutes or less) and an eccentricity less than 0.25.\[2] Most of the manmade objects in outer space are in LEO \[1].* VLEO: Very Low Earth Orbits (VLEO) can be defined as the orbits with a mean altitude below 450 km. Operating in these orbits can provide a number of benefits to Earth observation spacecraft as the spacecraft operates closer to the observation\[2].* GTO A geosynchronous orbit is a high Earth orbit that allows satellites to match Earth's rotation. Located at 22,236 miles (35,786 kilometers) above Earth's equator, this position is a valuable spot for monitoring weather, communications and surveillance. Because the satellite orbits at the same speed that the Earth is turning, the satellite seems to stay in place over a single longitude, though it may drift north to south,” NASA wrote on its Earth Observatory website \[3] .* SSO (or SO): It is a Sun-synchronous orbit also called a heliosynchronous orbit is a nearly polar orbit around a planet, in which the satellite passes over any given point of the planet's surface at the same local mean solar time \[4] .* ES-L1 :At the Lagrange points the gravitational forces of the two large bodies cancel out in such a way that a small object placed in orbit there is in equilibrium relative to the center of mass of the large bodies. L1 is one such point between the sun and the earth \[5] .* HEO A highly elliptical orbit, is an elliptic orbit with high eccentricity, usually referring to one around Earth \[6].* ISS A modular space station (habitable artificial satellite) in low Earth orbit. It is a multinational collaborative project between five participating space agencies: NASA (United States), Roscosmos (Russia), JAXA (Japan), ESA (Europe), and CSA (Canada) \[7] * MEO Geocentric orbits ranging in altitude from 2,000 km (1,200 mi) to just below geosynchronous orbit at 35,786 kilometers (22,236 mi). Also known as an intermediate circular orbit. These are "most commonly at 20,200 kilometers (12,600 mi), or 20,650 kilometers (12,830 mi), with an orbital period of 12 hours \[8] * HEO Geocentric orbits above the altitude of geosynchronous orbit (35,786 km or 22,236 mi) \[9] * GEO It is a circular geosynchronous orbit 35,786 kilometres (22,236 miles) above Earth's equator and following the direction of Earth's rotation \[10] * PO It is one type of satellites in which a satellite passes above or nearly above both poles of the body being orbited (usually a planet such as the Earth \[11] some are shown in the following plot:  TASK 2: Calculate the number and occurrence of each orbit Use the method .value_counts() to determine the number and occurrence of each orbit in the column Orbit
###Code
# Apply value_counts on Orbit column
df['Orbit'].value_counts()
###Output
_____no_output_____
###Markdown
TASK 3: Calculate the number and occurence of mission outcome per orbit type Use the method .value_counts() on the column Outcome to determine the number of landing_outcomes.Then assign it to a variable landing_outcomes.
###Code
landing_outcomes = df['Outcome'].value_counts()
landing_outcomes
###Output
_____no_output_____
###Markdown
True Ocean means the mission outcome was successfully landed to a specific region of the ocean while False Ocean means the mission outcome was unsuccessfully landed to a specific region of the ocean. True RTLS means the mission outcome was successfully landed to a ground pad False RTLS means the mission outcome was unsuccessfully landed to a ground pad.True ASDS means the mission outcome was successfully landed to a drone ship False ASDS means the mission outcome was unsuccessfully landed to a drone ship. None ASDS and None None these represent a failure to land.
###Code
for i,outcome in enumerate(landing_outcomes.keys()):
print(i,outcome)
###Output
0 True ASDS
1 None None
2 True RTLS
3 False ASDS
4 True Ocean
5 None ASDS
6 False Ocean
7 False RTLS
###Markdown
We create a set of outcomes where the second stage did not land successfully:
###Code
bad_outcomes=set(landing_outcomes.keys()[[1,3,5,6,7]])
bad_outcomes
###Output
_____no_output_____
###Markdown
TASK 4: Create a landing outcome label from Outcome column Using the Outcome, create a list where the element is zero if the corresponding row in Outcome is in the set bad_outcome; otherwise, it's one. Then assign it to the variable landing_class:
###Code
# landing_class = 0 if bad_outcome
# landing_class = 1 otherwise
landing_class=df['Outcome'].map(lambda x : 1 if x in bad_outcomes else 0)
###Output
_____no_output_____
###Markdown
This variable will represent the classification variable that represents the outcome of each launch. If the value is zero, the first stage did not land successfully; one means the first stage landed Successfully
###Code
df['Class']=landing_class
df[['Class']].head(8)
df.head(5)
###Output
_____no_output_____
###Markdown
We can use the following line of code to determine the success rate:
###Code
df["Class"].mean()
###Output
_____no_output_____ |
.ipynb_checkpoints/Logistic-Regression-checkpoint.ipynb | ###Markdown
Download MNIST Data Set and load into those variables
###Code
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
fig, axes= plt.subplots(1,4, figsize=(7,3))
for img, label, ax in zip(x_train[:4], y_train[:4], axes):
ax.set_title(label)
ax.imshow(img)
ax.axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
We must flatten the images and scale them from 0-1
###Code
x_train = x_train.reshape(60000, 784) / 255
x_test = x_test.reshape(10000, 784) / 255
###Output
_____no_output_____
###Markdown
Create a one hot-array for the y-values Creates an array of 10 elements
###Code
with tf.Session() as sesh:
y_train = sesh.run(tf.one_hot(y_train, 10))
y_test = sesh.run(tf.one_hot(y_test, 10))
y_train[:4]
# hyper parameters
learning_rate = 0.01
epochs = 50
# Divide the total number of pictues by the batch size to get num of batches
batch_size = 100
batches = int(x_train.shape[0] / batch_size)
###Output
_____no_output_____
###Markdown
Y is a 10 element list. x is a 784 element long list since we flattened it. w is a matrix of size 784 x 10. b is a 10 element matrix. y = wx + b Inputs: X is the "flattened / normalized: images Y is the "one hot' labels
###Code
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
W = tf.Variable(np.random.randn(784, 10).astype(np.float32))
B = tf.Variable(np.random.randn(10).astype(np.float32))
###Output
_____no_output_____
###Markdown
Softmax function converts all prediction scores to probabilities and makes the sum of the probabilities equal to 1.
###Code
pred = tf.nn.softmax(tf.add(tf.matmul(X,W), B))
###Output
_____no_output_____ |
P04-HigherOrderMethods.ipynb | ###Markdown
Higher Order Methods====================The Heun Method is an example of a more general class of numerical solvers known as Runge-Kutta solvers. See the slides for details, but basically you can classify RK solvers by the way they step in time and how they weight the extrapolated results in the final answer.One particular choise of weights that is very popular is the RK4 table:Which leads to the RK4Step function: def RK4Step(s, dt, t, derivs): f1 = derivs(s, t)*dt f2 = derivs(s+f1/2.0, t+dt/2.0)*dt f3 = derivs(s+f2/2.0, t+dt/2.0)*dt f4 = derivs(s+f3, t+dt)*dt return s + (f1+2*f2+2*f3+f4)/6.0Symplectic Methods==================Some kinds of systems have properties that make even high order RK methods unsatisfactory. These are systems where Energy (and other quantities) are conserved. These are called "Hamiltonian" systems due to the extensive history of applying the Hamiltonian (Energy oriented) formalism to their solution. See the slides for more details of the Hamiltonian formalism. The RK algorithms focus on reducing trucation error, but do not respect any inherantly conserved quantities. *Symplectic* methods are designed to exactly conserve these quantities at the possible expense of some truncation error. The simplest of these is the *SymplecticEuler* method (sometimes knows as the *Cromer* method). A second order version is the *Verlet* method, or the "Leapfrog" method. def EulerStep(s, t, derivs, dt): return s + derivs(s,t)*dt def SymplecticEulerStep(s, t, derivs, dt): s1 = s + derivs(s,t,0)*dt q-step return s1 + derivs(s1,t,1)*dt p-step def VerletStep(s, t, derivs, dt): dth = dt/2.0 half of h s = s + derivs(s, t, 0)*dth changes only positon s = s + derivs(s, t+dth, 1)*dt changes only velocity return s + derivs(s, t+dt, 0)*dth change only positionNotice that when the SymplecticEulerStep and VerletStep methods call derivs they add a third argument. This argument tells the derivs function whether the step is a "space step, or q-step" (pass a zero) or a "velocity step, or p-step" (pass a one). If the derivs function detects that a third argument has been passed it should return zero in the rate of change of the "other" part of the state (e.g., if it's a "space step" then the rate of change of the velocity (or momentum) should be zero). In this way the VerletStep function can carefully craft a sequence of steps that take care to conserve area in phase space (and energy in the long run, even though they may be less accurate in the short run).Below are some examples of EulerStep, SymplecticEulerStep, RK4Step and VerletStep applied to the simple harmonic oscillator.
###Code
%pylab inline
def EulerStep(s, t, derivs, dt):
"""
Step whichever way derivs says to go.
"""
return s + derivs(s,t)*dt
def SymplecticEulerStep(s, t, derivs, dt):
"""
Take two steps, one in only "q", one in only "p" direction.
"""
s1 = s + derivs(s,t,0)*dt # q-step (only change position)
return s1 + derivs(s1,t,1)*dt # p-step (only change momentum)
k=1.0
m=1.0
def derivs_sho(s, t, step=None):
"""
Simple harmonic oscillator derivs for symplectic and non-symplectic
The 'state' array (s) now has an ensemble of oscillators with slightly
different initial conditions. The first n elements are the positions
and the second half are the velocities.
"""
n=int(len(s)/2) # the first half of s is 'q', second half 'p'
v=s[n:]
if step==0:
return append(v, zeros(n)) # for q-steps, just update the position
# no need to compute 'a'
else:
x=s[:n] # only extract x and compute a if we need it.
a=-k*x/m
if step is None: # it must be an RK4 step
return append(v,a)
else: # velocity step
return append(zeros(n),a) # for p-steps, just updated the velocity
rcParams['figure.figsize']=(7.,7.)
DO_SYMPLECTIC=False # Change "DO_SYMPLECTIC" to True or False to switch between
# SymplecticEuler and Euler "step" functions.
delta=0.1 # how far apart in phase space are the points
s0= array([1.0,0.0])
s1=s0 + array([delta, 0])
s2=s0 + array([delta,delta])
s3=s0 + array([0, delta]) # four points in phase space
t = 0.0
dt = pi/10
s = array(list(flatten(array([s0,s1,s2,s3,s0]).T))) # state of four objects -> [x1,x2,x3,x4,x5,v1,v2,v3,v4,v5]
# (fifth object is same as first to "close the box")
print("s(at t=0)=",s)
n = int(len(s)/2)
clf()
if DO_SYMPLECTIC:
title("Harmonic Oscillator Phase Space: Symplectic Euler")
wsize=1.4
else:
title("Harmonic Oscillator Phase Space: Euler")
wsize=2.5
axes().set_aspect('equal')
axis([-wsize,wsize,-wsize,wsize])
xlabel("position")
ylabel("momentum (or velocity since m=1)")
plot(s[:n], s[n:], 'r-',s[:n],s[n:],'b.')
while t<1.9*pi:
if DO_SYMPLECTIC:
s=SymplecticEulerStep(s, t, derivs_sho, dt)
else:
s=EulerStep(s, t, derivs_sho, dt)
t+=dt
plot(s[:n], s[n:], 'r-',s[:n],s[n:],'b.')
def VerletStep(s, t, derivs, dt):
dth = dt/2.0 # half of h
s = s + derivs(s, t, 0)*dth # changes only positon
s = s + derivs(s, t+dth, 1)*dt # changes only velocity from s1
return s + derivs(s, t+dt, 0)*dth # change only position
def RK4Step(s, t, derivs, dt):
"""
Take a single RK4 step.
"""
dth=dt/2.0
f1 = derivs(s, t)
f2 = derivs(s+f1*dth, t+dth)
f3 = derivs(s+f2*dth, t+dth)
f4 = derivs(s+f3*dt, t+dt)
return s + (f1+2*f2+2*f3+f4)*dt/6.0
x=1.0
v=0.0
t=0.0
dt=0.3
tlist=[t] # times
erlist=[0.5] # RK4 Energies
evlist=[0.5] # Verlet Energies
sr=array([x,v]) # state (RK4)
sv=array([x,v]) # state (Verlet)
while t<3000*pi:
sr = RK4Step(sr, t, derivs_sho, dt) # take each "type" of step
sv = VerletStep(sv, t, derivs_sho, dt)
t += dt
tlist.append(t)
Er = 0.5*sr[1]**2 + 0.5*sr[0]**2 # compute energies
Ev = 0.5*sv[1]**2 + 0.5*sv[0]**2
erlist.append(Er)
evlist.append(Ev)
title("SHO Energies")
xlabel("time (s)")
ylabel("energy (J)")
plot(tlist, erlist, 'r-', label="RK4")
plot(tlist, evlist, 'g-', label="Verlet")
legend(loc=3)
###Output
_____no_output_____
###Markdown
Notice that the RK4 method, being 4th order, is much more "accurate" in the short term (the variations in energy are much smaller) in the long run the energy drifts a lot. The Verlet method has more short term error, but over the long run, the energy remains bounded near the original energy.Project 4: Orbital Mechanics=============================You will compute the orbit of an asteroid under the influence of the Sun and Jupiter. Use the RK4 and Verlet algorithm and investigate the long term conservation of energy for both algorithms. Below find an example of approximately computing Earth's orbit about the Sun. You should be able to swich out RK4 and Verlet step functions.What do I have to do?---------------------Please write a report describing your efforts. Be sure to include the following:* The calculation of the orbital trajectory of an asteroid whose orbital period is half of Jupiter's period.* Investigate the "long term" behavior of the results using RK4 algorithm compared to Verlet.* A graph of energy vs. time for such an asteroid using both RK4 and Verlet methods.* Any general conclusion you can draw from your results regarding the virtues and/or drawbacks of these methods.
###Code
GMs = (2*pi)**2 # measure time in years, distance in AU
def derivs_grav(s, t, step=None):
"""
Compute motion of Earth about the Sun
"""
r=s[:2]
v=s[2:]
if step==0: # Verlet space-step
return append(v,zeros(2))
else:
rnorm = sqrt(sum(r*r))
a = -GMs*r/rnorm**3
if step is None: # RK step
return append(v,a)
else: # Verlet velocity-step
return append(zeros(2),a)
v = array([0.0,2*pi])
r = array([1.0,0.0])
s=append(r,v)
t=0.0
dt=0.01
tlist=[t]
xlist=[s[0]]
ylist=[s[1]]
while t<1.1:
s = RK4Step(s, t, derivs_grav, dt)
t += dt
tlist.append(t)
xlist.append(s[0])
ylist.append(s[1])
title("Earth Orbit")
xlabel("x-position (AU)")
ylabel("y-position (AU)")
axes().set_aspect('equal')
axis([-1.1,1.1,-1.1,1.1])
plot(xlist, ylist)
title("Earth Orbit")
xlabel("time (years)")
ylabel("y-position (AU)")
plot(tlist, ylist)
grid()
###Output
_____no_output_____
###Markdown
Starter Code============Below you'll find a derivs_grav function that computes the motion of an asteroid and Jupiter about the sun. It's set up to take combined q/p steps (appropriate for an RK scheme). You can use this directly to study the motion with RK4Step and investigate solutions for different initial conditions.
###Code
#
# This is a derivs function for the RK4 method
# You need to modify it to work with the Symplectic Integrators
#
G = (2*pi)**2 # measure time in years, distance in AU
Ms = 1.0 # mass in solar masses
Mj = Ms/1047 # jupiter's mass is much less than the Sun's
Ma = Mj/1e7 # typical asteroid mass.. *really* small.
GMs = G*Ms # save multiplying later ...
GMj = G*Mj
GMa = G*Ma
def derivs_grav(s, t, step=None):
"""
Compute motion of asteriod and Jupiter about the Sun
"""
rsa=s[:2] # position of asteroid relative to sun
rsj=s[2:4] # for symplectic integrators it's handy to have all r's together
va=s[4:6] # followed by all v's in the state array.
vj=s[6:8]
rja=rsa-rsj
rsjm3 = (rsj*rsj).sum()**(1.5) # compute |r_{sj}|**3 for vector force calculation
rsam3 = (rsa*rsa).sum()**(1.5) # similar for r_{sa}
rjam3 = (rja*rja).sum()**(1.5) # similar for r_{ja}
aj = -(GMs*rsj/rsjm3 - GMa*rja/rjam3)
aa = -(GMs*rsa/rsam3 + GMj*rja/rjam3)
return array([va[0],va[1],vj[0],vj[1],aa[0],aa[1],aj[0],aj[1]])
Rj=5.2 # AU
Ra=3.0 # AU
s=array([Ra,0,Rj,0,0,sqrt(GMs/Ra),0,sqrt(GMs/Rj)]) # assume circular orbits
xalist=[] # empty lists
yalist=[]
tlist=[]
xjlist=[]
yjlist=[]
t=0.0
dt=0.3
while t<50:
s=RK4Step(s, t, derivs_grav, dt)
t+=dt
tlist.append(t)
xalist.append(s[0])
yalist.append(s[1])
xjlist.append(s[2])
yjlist.append(s[3])
title("Jupiter/Asteriod Orbit")
xlabel("x-position (AU)")
ylabel("y-position (AU)")
axes().set_aspect('equal')
plot(xalist, yalist,'b.',label="asteroid")
plot(xjlist, yjlist,'g.',label="jupiter")
legend()
###Output
_____no_output_____
###Markdown
Higher Order Methods====================The Heun Method is an example of a more general class of numerical solvers known as Runge-Kutta solvers. See the slides for details, but basically you can classify RK solvers by the way they step in time and how they weight the extrapolated results in the final answer.One particular choise of weights that is very popular is the RK4 table:Which leads to the RK4Step function: def RK4Step(s, dt, t, derivs): f1 = derivs(s, t)*dt f2 = derivs(s+f1/2.0, t+dt/2.0)*dt f3 = derivs(s+f2/2.0, t+dt/2.0)*dt f4 = derivs(s+f3, t+dt)*dt return s + (f1+2*f2+2*f3+f4)/6.0Symplectic Methods==================Some kinds of systems have properties that make even high order RK methods unsatisfactory. These are systems where Energy (and other quantities) are conserved. These are called "Hamiltonian" systems due to the extensive history of applying the Hamiltonian (Energy oriented) formalism to their solution. See the slides for more details of the Hamiltonian formalism. The RK algorithms focus on reducing trucation error, but do not respect any inherantly conserved quantities. *Symplectic* methods are designed to exactly conserve these quantities at the possible expense of some truncation error. The simplest of these is the *SymplecticEuler* method (sometimes knows as the *Cromer* method). A second order version is the *Verlet* method, or the "Leapfrog" method. def EulerStep(s, t, derivs, dt): return s + derivs(s,t)*dt def SymplecticEulerStep(s, t, derivs, dt): s1 = s + derivs(s,t,0)*dt q-step return s1 + derivs(s1,t,1)*dt p-step def VerletStep(s, t, derivs, dt): dth = dt/2.0 half of h s = s + derivs(s, t, 0)*dth changes only positon s = s + derivs(s, t+dth, 1)*dt changes only velocity return s + derivs(s, t+dt, 0)*dth change only positionNotice that when the SymplecticEulerStep and VerletStep methods call derivs they add a third argument. This argument tells the derivs function whether the step is a "space step, or q-step" (pass a zero) or a "velocity step, or p-step" (pass a one). If the derivs function detects that a third argument has been passed it should return zero in the rate of change of the "other" part of the state (e.g., if it's a "space step" then the rate of change of the velocity (or momentum) should be zero). In this way the VerletStep function can carefully craft a sequence of steps that take care to conserve area in phase space (and energy in the long run, even though they may be less accurate in the short run).Below are some examples of EulerStep, SymplecticEulerStep, RK4Step and VerletStep applied to the simple harmonic oscillator.
###Code
%pylab inline
def EulerStep(s, t, derivs, dt):
"""
Step whichever way derivs says to go.
"""
return s + derivs(s,t)*dt
def SymplecticEulerStep(s, t, derivs, dt):
"""
Take two steps, one in only "q", one in only "p" direction.
"""
s1 = s + derivs(s,t,0)*dt # q-step (only change position)
return s1 + derivs(s1,t,1)*dt # p-step (only change momentum)
k=1.0
m=1.0
def derivs_sho(s, t, step=None):
"""
Simple harmonic oscillator derivs for symplectic and non-symplectic
The 'state' array (s) now has an ensemble of oscillators with slightly
different initial conditions. The first n elements are the positions
and the second half are the velocities.
"""
n=int(len(s)/2) # the first half of s is 'q', second half 'p'
v=s[n:]
if step==0:
return append(v, zeros(n)) # for q-steps, just update the position
# no need to compute 'a'
else:
x=s[:n] # only extract x and compute a if we need it.
a=-k*x/m
if step is None: # it must be an RK4 step
return append(v,a)
else: # velocity step
return append(zeros(n),a) # for p-steps, just updated the velocity
rcParams['figure.figsize']=(7.,7.)
DO_SYMPLECTIC=False # Change "DO_SYMPLECTIC" to True or False to switch between
# SymplecticEuler and Euler "step" functions.
delta=0.1 # how far apart in phase space are the points
s0= array([1.0,0.0])
s1=s0 + array([delta, 0])
s2=s0 + array([delta,delta])
s3=s0 + array([0, delta]) # four points in phase space
t = 0.0
dt = pi/10
s = array(list(flatten(array([s0,s1,s2,s3,s0]).T))) # state of four objects -> [x1,x2,x3,x4,x5,v1,v2,v3,v4,v5]
# (fifth object is same as first to "close the box")
print("s(at t=0)=",s)
n = int(len(s)/2)
clf()
if DO_SYMPLECTIC:
title("Harmonic Oscillator Phase Space: Symplectic Euler")
wsize=1.4
else:
title("Harmonic Oscillator Phase Space: Euler")
wsize=2.5
axes().set_aspect('equal')
axis([-wsize,wsize,-wsize,wsize])
xlabel("position")
ylabel("momentum (or velocity since m=1)")
plot(s[:n], s[n:], 'r-',s[:n],s[n:],'b.')
while t<1.9*pi:
if DO_SYMPLECTIC:
s=SymplecticEulerStep(s, t, derivs_sho, dt)
else:
s=EulerStep(s, t, derivs_sho, dt)
t+=dt
plot(s[:n], s[n:], 'r-',s[:n],s[n:],'b.')
def VerletStep(s, t, derivs, dt):
dth = dt/2.0 # half of h
s = s + derivs(s, t, 0)*dth # changes only positon
s = s + derivs(s, t+dth, 1)*dt # changes only velocity from s1
return s + derivs(s, t+dt, 0)*dth # change only position
def RK4Step(s, t, derivs, dt):
"""
Take a single RK4 step.
"""
dth=dt/2.0
f1 = derivs(s, t)
f2 = derivs(s+f1*dth, t+dth)
f3 = derivs(s+f2*dth, t+dth)
f4 = derivs(s+f3*dt, t+dt)
return s + (f1+2*f2+2*f3+f4)*dt/6.0
x=1.0
v=0.0
t=0.0
dt=0.3
tlist=[t] # times
erlist=[0.5] # RK4 Energies
evlist=[0.5] # Verlet Energies
sr=array([x,v]) # state (RK4)
sv=array([x,v]) # state (Verlet)
while t<3000*pi:
sr = RK4Step(sr, t, derivs_sho, dt) # take each "type" of step
sv = VerletStep(sv, t, derivs_sho, dt)
t += dt
tlist.append(t)
Er = 0.5*sr[1]**2 + 0.5*sr[0]**2 # compute energies
Ev = 0.5*sv[1]**2 + 0.5*sv[0]**2
erlist.append(Er)
evlist.append(Ev)
title("SHO Energies")
xlabel("time (s)")
ylabel("energy (J)")
plot(tlist, erlist, 'r-', label="RK4")
plot(tlist, evlist, 'g-', label="Verlet")
legend(loc=3)
###Output
_____no_output_____
###Markdown
Notice that the RK4 method, being 4th order, is much more "accurate" in the short term (the variations in energy are much smaller) in the long run the energy drifts a lot. The Verlet method has more short term error, but over the long run, the energy remains bounded near the original energy.Project 4: Orbital Mechanics=============================You will compute the orbit of an asteroid under the influence of the Sun and Jupiter. Use the RK4 and Verlet algorithm and investigate the long term conservation of energy for both algorithms. Below find an example of approximately computing Earth's orbit about the Sun. You should be able to swich out RK4 and Verlet step functions.What do I have to do?---------------------Please write a report describing your efforts. Be sure to include the following:* The calculation of the orbital trajectory of an asteroid whose orbital period is half of Jupiter's period.* Investigate the "long term" behavior of the results using RK4 algorithm compared to Verlet.* A graph of energy vs. time for such an asteroid using both RK4 and Verlet methods.* Any general conclusion you can draw from your results regarding the virtues and/or drawbacks of these methods.
###Code
GMs = (2*pi)**2 # measure time in years, distance in AU
def derivs_grav(s, t, step=None):
"""
Compute motion of Earth about the Sun
"""
r=s[:2]
v=s[2:]
if step==0: # Verlet space-step
return append(v,zeros(2))
else:
rnorm = sqrt(sum(r*r))
a = -GMs*r/rnorm**3
if step is None: # RK step
return append(v,a)
else: # Verlet velocity-step
return append(zeros(2),a)
v = array([0.0,2*pi])
r = array([1.0,0.0])
s=append(r,v)
t=0.0
dt=0.01
tlist=[t]
xlist=[s[0]]
ylist=[s[1]]
while t<1.1:
s = RK4Step(s, t, derivs_grav, dt)
t += dt
tlist.append(t)
xlist.append(s[0])
ylist.append(s[1])
title("Earth Orbit")
xlabel("x-position (AU)")
ylabel("y-position (AU)")
axes().set_aspect('equal')
axis([-1.1,1.1,-1.1,1.1])
plot(xlist, ylist)
title("Earth Orbit")
xlabel("time (years)")
ylabel("y-position (AU)")
plot(tlist, ylist)
grid()
###Output
_____no_output_____
###Markdown
Starter Code============Below you'll find a derivs_grav function that computes the motion of an asteroid and Jupiter about the sun. It's set up to take combined q/p steps (appropriate for an RK scheme). You can use this directly to study the motion with RK4Step and investigate solutions for different initial conditions.
###Code
#
# This is a derivs function for the RK4 method
# You need to modify it to work with the Symplectic Integrators
#
G = (2*pi)**2 # measure time in years, distance in AU
Ms = 1.0 # mass in solar masses
Mj = Ms/1047 # jupiter's mass is much less than the Sun's
Ma = Mj/1e7 # typical asteroid mass.. *really* small.
GMs = G*Ms # save multiplying later ...
GMj = G*Mj
GMa = G*Ma
def derivs_grav(s, t, step=None):
"""
Compute motion of asteriod and Jupiter about the Sun
"""
rsa=s[:2] # position of asteroid relative to sun
rsj=s[2:4] # for symplectic integrators it's handy to have all r's together
va=s[4:6] # followed by all v's in the state array.
vj=s[6:8]
rja=rsa-rsj
rsjm3 = (rsj*rsj).sum()**(1.5) # compute |r_{sj}|**3 for vector force calculation
rsam3 = (rsa*rsa).sum()**(1.5) # similar for r_{sa}
rjam3 = (rja*rja).sum()**(1.5) # similar for r_{ja}
aj = -(GMs*rsj/rsjm3 - GMa*rja/rjam3)
aa = -(GMs*rsa/rsam3 + GMj*rja/rjam3)
return array([va[0],va[1],vj[0],vj[1],aa[0],aa[1],aj[0],aj[1]])
Rj=5.2 # AU
Ra=3.0 # AU
s=array([Ra,0,Rj,0,0,sqrt(GMs/Ra),0,sqrt(GMs/Rj)]) # assume circular orbits
xalist=[] # empty lists
yalist=[]
tlist=[]
xjlist=[]
yjlist=[]
t=0.0
dt=0.3
while t<50:
s=RK4Step(s, t, derivs_grav, dt)
t+=dt
tlist.append(t)
xalist.append(s[0])
yalist.append(s[1])
xjlist.append(s[2])
yjlist.append(s[3])
title("Jupiter/Asteriod Orbit")
xlabel("x-position (AU)")
ylabel("y-position (AU)")
axes().set_aspect('equal')
plot(xalist, yalist,'b.',label="asteroid")
plot(xjlist, yjlist,'g.',label="jupiter")
legend()
###Output
_____no_output_____ |
.ipynb_checkpoints/Drawing samples-checkpoint.ipynb | ###Markdown
Drawing samples Curvilinear trapezoidal distributionTo sample from CTrap(a, b, d), make two draws $r_1$ and $r_2$ independently from the standard rectangular distribution $R(0, 1)$ and form $$ a_s = (a − d) + 2dr_1 \qquad b_s = (a+b)-a_s , $$and$$ \xi = a_s + (b_s − a_s)r_2 . $$In this way $a_s$ is a draw from the rectangular distribution with limits $a \pm d$. $b_s$ is then formed to ensure that the midpoint of $a_s$ and $b_s$ is the prescribed value $x = (a + b)/2$. TaskA certificate states that a voltage X lies in the interval 10.0 V ± 0.1 V. No other information is available concerning X, except that it is believed that the magnitude of the interval endpoints is the result of rounding correctly some numerical value. On this basis, that numerical value lies between 0.05 V and 0.15 V, since the numerical value of every point in the interval (0.05, 0.15) rounded to one significant decimal digit is 0.1. The location of the interval can therefore be regarded as fixed, whereas its width is inexact. The best estimate of X is x = 10.0 V. Based on a = 9.9 V, b = 10.1 V and d = 0.05 V, sample from the PDF and calculate the best estimate and the associated uncertainty.
###Code
%pylab inline
a = 9.9
b = 10.1
d = 0.05
MCruns = 1000000
r1 = random.rand(MCruns)
r2 = random.rand(MCruns)
a_s = (a-d) + 2*d*r1
b_s = (a+b) - a_s
xi = a_s + (b_s-a_s)*r2
hist(xi, bins = MCruns//1000, edgecolor="none");
x = xi.mean()
ux = xi.std()
print("estimate = %g\tuncertainty = %g"%(x, ux))
axvline(x-ux,color="k");
axvline(x+ux,color="k");
###Output
estimate = 9.99999 uncertainty = 0.060042
|
notebooks/Kannada-MNIST/mnist_cnn.ipynb | ###Markdown
1. transform & dataload
###Code
class RandomRotation:
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
angle = np.random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img):
def rotate(img, angle, resample=False, expand=False, center=None):
return img.rotate(angle, resample, expand, center)
angle = self.get_params(self.degrees)
return rotate(img, angle, self.resample, self.expand, self.center)
class RandomShift(object):
def __init__(self, shift):
self.shift = shift
@staticmethod
def get_params(shift):
hshift, vshift = np.random.uniform(-shift, shift, size=2)
return hshift, vshift
def __call__(self, img):
hshift, vshift = self.get_params(self.shift)
return img.transform(img.size, Image.AFFINE, (1,0,hshift,0,1,vshift), resample=Image.BICUBIC, fill=1)
class Kannada_MNIST_data(Dataset):
def __init__(self, df, aug=False):
self.aug = aug
n_pixels = 28 * 28
# if "id" in df.columns:
# print("drop")
# df.drop(["id"], axis=1)
if "label" not in df.columns:
# test data
self.X = df.iloc[:,1:].values.reshape((-1,28,28)).astype(np.uint8)[:,:,:,None]
self.y = None
else:
# training data
self.X = df.iloc[:,1:].values.reshape((-1,28,28)).astype(np.uint8)[:,:,:,None]
self.y = torch.from_numpy(df.iloc[:,0].values)
if self.y is not None and self.aug:
self.transform = transforms.Compose([
transforms.ToPILImage(),
RandomRotation(degrees=10),
RandomShift(3),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5,), std=(0.5,))
])
else:
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5,), std=(0.5,))
])
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.y is not None:
return self.transform(self.X[idx]), self.y[idx]
else:
return self.transform(self.X[idx])
%%time
full_train_df = pd.read_csv(data_root+'train.csv')
test_df = pd.read_csv(data_root+'test.csv')
train_df, valid_df = train_test_split(full_train_df, test_size=0.2, random_state=RANDOM_STATE, shuffle=True)
batch_size = 64
train_dataset = Kannada_MNIST_data(train_df, aug=True)
valid_dataset = Kannada_MNIST_data(valid_df)
test_dataset = Kannada_MNIST_data(test_df)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=2)
def show_batch(loader, lb=True):
batcher = iter(loader)
if lb:
images, labels = batcher.next()
else:
images = batcher.next()
grid = torchvision.utils.make_grid(images)
plt.imshow(grid.numpy().transpose((1, 2, 0)))
plt.axis('off')
if lb:
plt.title(labels.numpy());
plt.show()
show_batch(train_loader)
show_batch(valid_loader)
show_batch(test_loader, lb=False)
train_loader, valid_loader, test_loader
def train(model, train_loader):
batch_loss = 0.0
batch_corrects = 0.0
model.train()
for inputs, labels in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
preds = torch.max(outputs, 1)[1]
batch_loss += loss.item()
batch_corrects += torch.sum(preds == labels.data)
return batch_loss/len(train_loader), batch_corrects.float()/len(train_dataset)
def evaluate(model, valid_loader):
loss = 0.0
corrects = 0.0
model.eval()
with torch.no_grad():
for inputs, labels in valid_loader:
inputs, labels = Variable(inputs), Variable(labels)
outputs = model(inputs)
loss += F.cross_entropy(outputs, labels, reduction='mean').item()
pred = outputs.data.max(1, keepdim=True)[1]
corrects += pred.eq(labels.data.view_as(pred)).cpu().sum()
return loss/len(valid_loader), corrects.float()/len(valid_dataset)
###Output
_____no_output_____
###Markdown
1. Test pipeline
###Code
class SimpleConvNet(nn.Module):
def __init__(self):
super(SimpleConvNet, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(1, 20, 5, 1),
nn.ReLU(),
nn.AvgPool2d(2, stride=2),
nn.Conv2d(20, 50, 5, 1),
nn.BatchNorm2d(50),
nn.ReLU(),
nn.AvgPool2d(2, stride=2)
)
self.full_conn_layers = nn.Sequential(
nn.Linear(4*4*50, 500),
nn.ReLU(),
nn.Linear(500, 10)
)
def forward(self, x):
x = self.conv_layers(x)
x = x.view(x.size(0), -1)
x = self.full_conn_layers(x)
return x
model = SimpleConvNet()
epochs = 20
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
factor=0.3,
mode="max",
verbose=True,
patience=1,
threshold=1e-3
)
%%time
epoch_loss_history = []
epoch_corrects_history = []
val_loss_history = []
val_corrects_history = []
for epoch in range(epochs):
epoch_loss, epoch_corrects = train(model, train_loader)
val_loss, val_corrects = evaluate(model, valid_loader)
epoch_loss_history.append(epoch_loss)
epoch_corrects_history.append(epoch_corrects)
val_loss_history.append(val_loss)
val_corrects_history.append(val_corrects)
print('epoch:', (epoch+1))
print('training loss: {:.4f}, training acc {:.4f} '.format(epoch_loss, epoch_corrects.item()))
print('validation loss: {:.4f}, validation acc {:.4f} '.format(val_loss, val_corrects.item()))
scheduler.step(val_corrects)
plt.plot(epoch_loss_history, label='training loss')
plt.plot(val_loss_history, label='validation loss')
plt.legend()
plt.plot(epoch_corrects_history, label='training accuracy')
plt.plot(val_corrects_history, label='validation accuracy')
plt.legend()
path = f"{model_file_root}SimpleNetNet{model_file_ext}"
torch.save(model.state_dict(), path)
###Output
_____no_output_____
###Markdown
2. CNN
###Code
class Conv2Class2Net(nn.Module):
def __init__(self):
super(Conv2Class2Net, self).__init__()
self.cnn_layers = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout(0.2),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout(0.2)
)
self.fc_layers = nn.Sequential(
nn.Linear(64 * 7 * 7, 512),
nn.BatchNorm1d(512),
nn.LeakyReLU(inplace=True),
nn.Dropout(0.3),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.LeakyReLU(inplace=True),
nn.Dropout(0.3),
nn.Linear(256, 10)
)
for m in self.cnn_layers.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
for m in self.fc_layers.children():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
out = self.cnn_layers(x)
out = out.view(out.size(0), -1)
out = self.fc_layers(out)
return out
model = Conv2Class2Net()
epochs = 20
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
factor=0.3,
mode="max",
verbose=True,
patience=1,
threshold=1e-3
)
%%time
epoch_loss_history = []
epoch_corrects_history = []
val_loss_history = []
val_corrects_history = []
for epoch in range(epochs):
epoch_loss, epoch_corrects = train(model, train_loader)
val_loss, val_corrects = evaluate(model, valid_loader)
epoch_loss_history.append(epoch_loss)
epoch_corrects_history.append(epoch_corrects)
val_loss_history.append(val_loss)
val_corrects_history.append(val_corrects)
print('epoch:', (epoch+1))
print('training loss: {:.4f}, training acc {:.4f} '.format(epoch_loss, epoch_corrects.item()))
print('validation loss: {:.4f}, validation acc {:.4f} '.format(val_loss, val_corrects.item()))
scheduler.step(val_corrects)
path = f"{model_file_root}Conv2Class2Net_{epoch+1}{model_file_ext}"
torch.save(model.state_dict(), path)
plt.plot(epoch_loss_history, label='training loss')
plt.plot(val_loss_history, label='validation loss')
plt.legend()
plt.plot(epoch_corrects_history, label='training accuracy')
plt.plot(val_corrects_history, label='validation accuracy')
plt.legend()
###Output
_____no_output_____
###Markdown
Predict
###Code
path = model_file_root+'SimpleNetNet'+model_file_ext
model = SimpleConvNet()
model.load_state_dict(torch.load(path))
model.eval()
def prediciton(model, data_loader):
model.eval()
test_pred = torch.LongTensor()
for i, data in enumerate(data_loader):
data = Variable(data, volatile=True)
output = model(data)
pred = output.data.max(1, keepdim=True)[1]
test_pred = torch.cat((test_pred, pred), dim=0)
return test_pred
%%time
test_pred = prediciton(model, test_loader)
out_df = pd.DataFrame(np.c_[np.arange(1, len(test_dataset)+1)[:,None], test_pred.numpy()], columns=['ImageId', 'Label'])
out_df.head()
out_df.to_csv('submission.csv', index=False)
###Output
_____no_output_____ |
Chapter02/ch2_nb1_mnist_keras.ipynb | ###Markdown
Tip. Click here to view this notebook on nbviewer.jupyter.org. These notebooks are better read there, as Github default viewer ignores some of the formatting and interactive content. Hands-on Computer Vision with TensorFlow 2by Eliot Andres & Benjamin Planche (Packt Pub.) > Chapter 2: TensorFlow Basics and Training a Model Notebook 1:A simple computer vision model using Keras In the second chapter of the book, we introduced the Keras API and how to build a simple model. In this first notebook, we will therefore detail the related code snippets and results from the book. Tip. The notebooks shared on this git repository illustrate some notions from the book "Hands-on Computer Vision with TensorFlow 2" written by Eliot Andres and Benjamin Planche, published by Packt. If you enjoyed the insights shared here, please consider acquiring the book!The book provides further guidance for those eager to learn about computer vision and to harness the power of TensorFlow 2 and Keras to build efficient recognition systems for object detection, segmentation, video processing, smartphone applications, and more. Leverage deep learning to create powerful image processing apps with TensorFlow 2 and Keras. Get the book for more insights!
###Code
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Input data
###Code
num_classes = 10
img_rows, img_cols = 28, 28
num_channels = 1
input_shape = (img_rows, img_cols, num_channels)
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
###Output
_____no_output_____
###Markdown
Building a simple model
###Code
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))
###Output
_____no_output_____
###Markdown
Launch training
###Code
model.compile(optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
callbacks = [tf.keras.callbacks.TensorBoard('./keras')]
model.fit(x_train, y_train, epochs=25, verbose=1, validation_data=(x_test, y_test), callbacks=callbacks)
###Output
Train on 60000 samples, validate on 10000 samples
Epoch 1/25
60000/60000 [==============================] - 2s 35us/sample - loss: 0.6397 - accuracy: 0.8418 - val_loss: 0.3560 - val_accuracy: 0.9042
Epoch 2/25
60000/60000 [==============================] - 2s 32us/sample - loss: 0.3369 - accuracy: 0.9059 - val_loss: 0.2958 - val_accuracy: 0.9187
Epoch 3/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.2899 - accuracy: 0.9183 - val_loss: 0.2651 - val_accuracy: 0.9241
Epoch 4/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.2603 - accuracy: 0.9262 - val_loss: 0.2417 - val_accuracy: 0.9316
Epoch 5/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.2378 - accuracy: 0.9330 - val_loss: 0.2222 - val_accuracy: 0.9359
Epoch 6/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.2192 - accuracy: 0.9384 - val_loss: 0.2074 - val_accuracy: 0.9405
Epoch 7/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.2029 - accuracy: 0.9436 - val_loss: 0.1947 - val_accuracy: 0.9450
Epoch 8/25
60000/60000 [==============================] - 2s 32us/sample - loss: 0.1892 - accuracy: 0.9469 - val_loss: 0.1814 - val_accuracy: 0.9479
Epoch 9/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1771 - accuracy: 0.9505 - val_loss: 0.1719 - val_accuracy: 0.9502
Epoch 10/25
60000/60000 [==============================] - 2s 34us/sample - loss: 0.1668 - accuracy: 0.9528 - val_loss: 0.1627 - val_accuracy: 0.9532
Epoch 11/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1574 - accuracy: 0.9554 - val_loss: 0.1559 - val_accuracy: 0.9542
Epoch 12/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1492 - accuracy: 0.9583 - val_loss: 0.1491 - val_accuracy: 0.9560
Epoch 13/25
60000/60000 [==============================] - 2s 36us/sample - loss: 0.1420 - accuracy: 0.9599 - val_loss: 0.1439 - val_accuracy: 0.9581
Epoch 14/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1352 - accuracy: 0.9622 - val_loss: 0.1382 - val_accuracy: 0.9597
Epoch 15/25
60000/60000 [==============================] - 2s 34us/sample - loss: 0.1292 - accuracy: 0.9639 - val_loss: 0.1335 - val_accuracy: 0.9615
Epoch 16/25
60000/60000 [==============================] - 2s 34us/sample - loss: 0.1236 - accuracy: 0.9657 - val_loss: 0.1286 - val_accuracy: 0.9626
Epoch 17/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1186 - accuracy: 0.9668 - val_loss: 0.1245 - val_accuracy: 0.9628
Epoch 18/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1135 - accuracy: 0.9684 - val_loss: 0.1223 - val_accuracy: 0.9645
Epoch 19/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1095 - accuracy: 0.9697 - val_loss: 0.1172 - val_accuracy: 0.9648
Epoch 20/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1053 - accuracy: 0.9713 - val_loss: 0.1153 - val_accuracy: 0.9663
Epoch 21/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1017 - accuracy: 0.9717 - val_loss: 0.1128 - val_accuracy: 0.9670
Epoch 22/25
60000/60000 [==============================] - 2s 32us/sample - loss: 0.0983 - accuracy: 0.9725 - val_loss: 0.1095 - val_accuracy: 0.9681
Epoch 23/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.0949 - accuracy: 0.9735 - val_loss: 0.1066 - val_accuracy: 0.9693
Epoch 24/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.0917 - accuracy: 0.9747 - val_loss: 0.1054 - val_accuracy: 0.9695
Epoch 25/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.0891 - accuracy: 0.9758 - val_loss: 0.1040 - val_accuracy: 0.9696
###Markdown
Running with an estimator
###Code
estimator = tf.keras.estimator.model_to_estimator(model, model_dir='./estimator_dir')
BATCH_SIZE = 32
def train_input_fn():
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.batch(BATCH_SIZE).repeat()
return train_dataset
estimator.train(train_input_fn, steps=len(x_train)//BATCH_SIZE)
###Output
_____no_output_____
###Markdown
Tip. Click here to view this notebook on nbviewer.jupyter.org. These notebooks are better read there, as Github default viewer ignores some of the formatting and interactive content. Hands-on Computer Vision with TensorFlow 2by Eliot Andres & Benjamin Planche (Packt Pub.) > Chapter 2: TensorFlow Basics and Training a Model Notebook 1:A simple computer vision model using Keras In the second chapter of the book, we introduced the Keras API and how to build a simple model. In this first notebook, we will therefore detail the related code snippets and results from the book. Tip. The notebooks shared on this git repository illustrate some notions from the book "Hands-on Computer Vision with TensorFlow 2" written by Eliot Andres and Benjamin Planche, published by Packt. If you enjoyed the insights shared here, please consider acquiring the book!The book provides further guidance for those eager to learn about computer vision and to harness the power of TensorFlow 2 and Keras to build efficient recognition systems for object detection, segmentation, video processing, smartphone applications, and more. Leverage deep learning to create powerful image processing apps with TensorFlow 2 and Keras. Get the book for more insights!
###Code
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Input data
###Code
num_classes = 10
img_rows, img_cols = 28, 28
num_channels = 1
input_shape = (img_rows, img_cols, num_channels)
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
###Output
_____no_output_____
###Markdown
Building a simple model
###Code
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dense(num_classes, activation='softmax'))
###Output
_____no_output_____
###Markdown
Launch training
###Code
model.compile(optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
callbacks = [tf.keras.callbacks.TensorBoard('./keras')]
model.fit(x_train, y_train, epochs=25, verbose=1, validation_data=(x_test, y_test), callbacks=callbacks)
###Output
Train on 60000 samples, validate on 10000 samples
Epoch 1/25
60000/60000 [==============================] - 2s 35us/sample - loss: 0.6397 - accuracy: 0.8418 - val_loss: 0.3560 - val_accuracy: 0.9042
Epoch 2/25
60000/60000 [==============================] - 2s 32us/sample - loss: 0.3369 - accuracy: 0.9059 - val_loss: 0.2958 - val_accuracy: 0.9187
Epoch 3/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.2899 - accuracy: 0.9183 - val_loss: 0.2651 - val_accuracy: 0.9241
Epoch 4/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.2603 - accuracy: 0.9262 - val_loss: 0.2417 - val_accuracy: 0.9316
Epoch 5/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.2378 - accuracy: 0.9330 - val_loss: 0.2222 - val_accuracy: 0.9359
Epoch 6/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.2192 - accuracy: 0.9384 - val_loss: 0.2074 - val_accuracy: 0.9405
Epoch 7/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.2029 - accuracy: 0.9436 - val_loss: 0.1947 - val_accuracy: 0.9450
Epoch 8/25
60000/60000 [==============================] - 2s 32us/sample - loss: 0.1892 - accuracy: 0.9469 - val_loss: 0.1814 - val_accuracy: 0.9479
Epoch 9/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1771 - accuracy: 0.9505 - val_loss: 0.1719 - val_accuracy: 0.9502
Epoch 10/25
60000/60000 [==============================] - 2s 34us/sample - loss: 0.1668 - accuracy: 0.9528 - val_loss: 0.1627 - val_accuracy: 0.9532
Epoch 11/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1574 - accuracy: 0.9554 - val_loss: 0.1559 - val_accuracy: 0.9542
Epoch 12/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1492 - accuracy: 0.9583 - val_loss: 0.1491 - val_accuracy: 0.9560
Epoch 13/25
60000/60000 [==============================] - 2s 36us/sample - loss: 0.1420 - accuracy: 0.9599 - val_loss: 0.1439 - val_accuracy: 0.9581
Epoch 14/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1352 - accuracy: 0.9622 - val_loss: 0.1382 - val_accuracy: 0.9597
Epoch 15/25
60000/60000 [==============================] - 2s 34us/sample - loss: 0.1292 - accuracy: 0.9639 - val_loss: 0.1335 - val_accuracy: 0.9615
Epoch 16/25
60000/60000 [==============================] - 2s 34us/sample - loss: 0.1236 - accuracy: 0.9657 - val_loss: 0.1286 - val_accuracy: 0.9626
Epoch 17/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1186 - accuracy: 0.9668 - val_loss: 0.1245 - val_accuracy: 0.9628
Epoch 18/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1135 - accuracy: 0.9684 - val_loss: 0.1223 - val_accuracy: 0.9645
Epoch 19/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1095 - accuracy: 0.9697 - val_loss: 0.1172 - val_accuracy: 0.9648
Epoch 20/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1053 - accuracy: 0.9713 - val_loss: 0.1153 - val_accuracy: 0.9663
Epoch 21/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.1017 - accuracy: 0.9717 - val_loss: 0.1128 - val_accuracy: 0.9670
Epoch 22/25
60000/60000 [==============================] - 2s 32us/sample - loss: 0.0983 - accuracy: 0.9725 - val_loss: 0.1095 - val_accuracy: 0.9681
Epoch 23/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.0949 - accuracy: 0.9735 - val_loss: 0.1066 - val_accuracy: 0.9693
Epoch 24/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.0917 - accuracy: 0.9747 - val_loss: 0.1054 - val_accuracy: 0.9695
Epoch 25/25
60000/60000 [==============================] - 2s 33us/sample - loss: 0.0891 - accuracy: 0.9758 - val_loss: 0.1040 - val_accuracy: 0.9696
###Markdown
Running with an estimator
###Code
estimator = tf.keras.estimator.model_to_estimator(model, model_dir='./estimator_dir')
BATCH_SIZE = 32
def train_input_fn():
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.batch(BATCH_SIZE).repeat()
return train_dataset
estimator.train(train_input_fn, steps=len(x_train)//BATCH_SIZE)
###Output
_____no_output_____ |
courses/01_Intro/1.1_Intro.ipynb | ###Markdown
1.1 Intro
###Code
## Intro ai comandi bash linux
!pwd # per printare il percorso su cui ci troviamo
!ls # per printare la lista delle cartelle che vediamo nell'attuale posizione
cd sample_data
# per spostarci a destra dentro le sottocartelle... cd .. per spostarci a sinistra cioè tornare alla cartella superiore
cd ..
from IPython.display import Image
Image('dog1.jpg',width=200,height=120)
#Image(url='https://cdn.mos.cms.futurecdn.net/QjuZKXnkLQgsYsL98uhL9X-1024-80.jpg',width=800,height=450)
!ls
!pip list
###Output
Package Version
------------------------ ---------------
absl-py 0.9.0
alabaster 0.7.12
albumentations 0.1.12
altair 4.0.1
asgiref 3.2.5
astor 0.8.1
astropy 4.0
astunparse 1.6.3
atari-py 0.2.6
atomicwrites 1.3.0
attrs 19.3.0
audioread 2.1.8
autograd 1.3
Babel 2.8.0
backcall 0.1.0
beautifulsoup4 4.6.3
bleach 3.1.3
blis 0.4.1
bokeh 1.4.0
boto3 1.12.27
botocore 1.15.27
Bottleneck 1.3.2
branca 0.4.0
bs4 0.0.1
CacheControl 0.12.6
cachetools 3.1.1
catalogue 1.0.0
certifi 2019.11.28
cffi 1.14.0
chainer 6.5.0
chardet 3.0.4
click 7.1.1
cloudpickle 1.3.0
cmake 3.12.0
cmdstanpy 0.4.0
colorlover 0.3.0
community 1.0.0b1
contextlib2 0.5.5
convertdate 2.2.0
coverage 3.7.1
coveralls 0.5
crcmod 1.7
cufflinks 0.17.3
cvxopt 1.2.4
cvxpy 1.0.28
cycler 0.10.0
cymem 2.0.3
Cython 0.29.15
daft 0.0.4
dask 2.12.0
dataclasses 0.7
datascience 0.10.6
decorator 4.4.2
defusedxml 0.6.0
descartes 1.1.0
dill 0.3.1.1
distributed 1.25.3
Django 3.0.4
dlib 19.18.0
docopt 0.6.2
docutils 0.15.2
dopamine-rl 1.0.5
earthengine-api 0.1.216
easydict 1.9
ecos 2.0.7.post1
editdistance 0.5.3
en-core-web-sm 2.2.5
entrypoints 0.3
ephem 3.7.7.1
et-xmlfile 1.0.1
fa2 0.3.5
fancyimpute 0.4.3
fastai 1.0.60
fastdtw 0.3.4
fastprogress 0.2.2
fastrlock 0.4
fbprophet 0.6
feather-format 0.4.0
featuretools 0.4.1
filelock 3.0.12
firebase-admin 4.0.0
fix-yahoo-finance 0.0.22
Flask 1.1.1
folium 0.8.3
fsspec 0.6.3
future 0.16.0
gast 0.3.3
GDAL 2.2.2
gdown 3.6.4
gensim 3.6.0
geographiclib 1.50
geopy 1.17.0
gin-config 0.3.0
glob2 0.7
google 2.0.3
google-api-core 1.16.0
google-api-python-client 1.7.12
google-auth 1.7.2
google-auth-httplib2 0.0.3
google-auth-oauthlib 0.4.1
google-cloud-bigquery 1.21.0
google-cloud-core 1.0.3
google-cloud-datastore 1.8.0
google-cloud-firestore 1.6.2
google-cloud-language 1.2.0
google-cloud-storage 1.18.1
google-cloud-translate 1.5.0
google-colab 1.0.0
google-pasta 0.2.0
google-resumable-media 0.4.1
googleapis-common-protos 1.51.0
googledrivedownloader 0.4
graphviz 0.10.1
grpcio 1.27.2
gspread 3.0.1
gspread-dataframe 3.0.4
gym 0.17.1
h5py 2.10.0
HeapDict 1.0.1
holidays 0.9.12
html5lib 1.0.1
httpimport 0.5.18
httplib2 0.17.0
httplib2shim 0.0.3
humanize 0.5.1
hyperopt 0.1.2
ideep4py 2.0.0.post3
idna 2.8
image 1.5.28
imageio 2.4.1
imagesize 1.2.0
imbalanced-learn 0.4.3
imblearn 0.0
imgaug 0.2.9
importlib-metadata 1.5.0
imutils 0.5.3
inflect 2.1.0
intel-openmp 2020.0.133
intervaltree 2.1.0
ipykernel 4.6.1
ipython 5.5.0
ipython-genutils 0.2.0
ipython-sql 0.3.9
ipywidgets 7.5.1
itsdangerous 1.1.0
jax 0.1.62
jaxlib 0.1.42
jdcal 1.4.1
jedi 0.16.0
jieba 0.42.1
Jinja2 2.11.1
jmespath 0.9.5
joblib 0.14.1
jpeg4py 0.1.4
jsonschema 2.6.0
jupyter 1.0.0
jupyter-client 5.3.4
jupyter-console 5.2.0
jupyter-core 4.6.3
kaggle 1.5.6
kapre 0.1.3.1
Keras 2.2.5
Keras-Applications 1.0.8
Keras-Preprocessing 1.1.0
keras-vis 0.4.1
kiwisolver 1.1.0
knnimpute 0.1.0
librosa 0.6.3
lightgbm 2.2.3
llvmlite 0.31.0
lmdb 0.98
lucid 0.3.8
LunarCalendar 0.0.9
lxml 4.2.6
Markdown 3.2.1
MarkupSafe 1.1.1
matplotlib 3.2.1
matplotlib-venn 0.11.5
missingno 0.4.2
mistune 0.8.4
mizani 0.6.0
mkl 2019.0
mlxtend 0.14.0
more-itertools 8.2.0
moviepy 0.2.3.5
mpmath 1.1.0
msgpack 1.0.0
multiprocess 0.70.9
multitasking 0.0.9
murmurhash 1.0.2
music21 5.5.0
natsort 5.5.0
nbconvert 5.6.1
nbformat 5.0.4
networkx 2.4
nibabel 3.0.2
nltk 3.2.5
notebook 5.2.2
np-utils 0.5.12.1
numba 0.47.0
numexpr 2.7.1
numpy 1.18.2
nvidia-ml-py3 7.352.0
oauth2client 4.1.3
oauthlib 3.1.0
okgrade 0.4.3
opencv-contrib-python 4.1.2.30
opencv-python 4.1.2.30
openpyxl 2.5.9
opt-einsum 3.2.0
osqp 0.6.1
packaging 20.3
palettable 3.3.0
pandas 0.25.3
pandas-datareader 0.7.4
pandas-gbq 0.11.0
pandas-profiling 1.4.1
pandocfilters 1.4.2
parso 0.6.2
pathlib 1.0.1
patsy 0.5.1
pexpect 4.8.0
pickleshare 0.7.5
Pillow 7.0.0
pip 19.3.1
pip-tools 4.5.1
plac 1.1.3
plotly 4.4.1
plotnine 0.6.0
pluggy 0.7.1
portpicker 1.3.1
prefetch-generator 1.0.1
preshed 3.0.2
prettytable 0.7.2
progressbar2 3.38.0
prometheus-client 0.7.1
promise 2.3
prompt-toolkit 1.0.18
protobuf 3.10.0
psutil 5.4.8
psycopg2 2.7.6.1
ptyprocess 0.6.0
py 1.8.1
pyarrow 0.14.1
pyasn1 0.4.8
pyasn1-modules 0.2.8
pycocotools 2.0.0
pycparser 2.20
pydata-google-auth 0.3.0
pydot 1.3.0
pydot-ng 2.0.0
pydotplus 2.0.2
PyDrive 1.3.1
pyemd 0.5.1
pyglet 1.5.0
Pygments 2.1.3
pygobject 3.26.1
pymc3 3.7
PyMeeus 0.3.7
pymongo 3.10.1
pymystem3 0.2.0
PyOpenGL 3.1.5
pyparsing 2.4.6
pyrsistent 0.16.0
pysndfile 1.3.8
PySocks 1.7.1
pystan 2.19.1.1
pytest 3.6.4
python-apt 1.6.5+ubuntu0.2
python-chess 0.23.11
python-dateutil 2.8.1
python-louvain 0.13
python-slugify 4.0.0
python-utils 2.4.0
pytz 2018.9
PyWavelets 1.1.1
PyYAML 3.13
pyzmq 17.0.0
qtconsole 4.7.1
QtPy 1.9.0
regex 2019.12.20
requests 2.21.0
requests-oauthlib 1.3.0
resampy 0.2.2
retrying 1.3.3
rpy2 3.2.7
rsa 4.0
s3fs 0.4.0
s3transfer 0.3.3
scikit-image 0.16.2
scikit-learn 0.22.2.post1
scipy 1.4.1
screen-resolution-extra 0.0.0
scs 2.1.1.post2
seaborn 0.10.0
Send2Trash 1.5.0
setuptools 46.0.0
setuptools-git 1.2
Shapely 1.7.0
simplegeneric 0.8.1
six 1.12.0
sklearn 0.0
sklearn-pandas 1.8.0
smart-open 1.10.0
snowballstemmer 2.0.0
sortedcontainers 2.1.0
spacy 2.2.4
Sphinx 1.8.5
sphinxcontrib-websupport 1.2.1
SQLAlchemy 1.3.15
sqlparse 0.3.1
srsly 1.0.2
statsmodels 0.10.2
sympy 1.1.1
tables 3.4.4
tabulate 0.8.7
tblib 1.6.0
tensorboard 2.1.1
tensorboardcolab 0.0.22
tensorflow 2.2.0rc1
tensorflow-addons 0.8.3
tensorflow-datasets 2.1.0
tensorflow-estimator 2.2.0rc0
tensorflow-gcs-config 2.1.8
tensorflow-hub 0.7.0
tensorflow-metadata 0.21.1
tensorflow-privacy 0.2.2
tensorflow-probability 0.9.0
termcolor 1.1.0
terminado 0.8.3
testpath 0.4.4
text-unidecode 1.3
textblob 0.15.3
textgenrnn 1.4.1
Theano 1.0.4
thinc 7.4.0
toolz 0.10.0
torch 1.4.0
torchsummary 1.5.1
torchtext 0.3.1
torchvision 0.5.0
tornado 4.5.3
tqdm 4.38.0
traitlets 4.3.3
tweepy 3.6.0
typeguard 2.7.1
typing 3.6.6
typing-extensions 3.6.6
tzlocal 1.5.1
umap-learn 0.3.10
uritemplate 3.0.1
urllib3 1.24.3
vega-datasets 0.8.0
wasabi 0.6.0
wcwidth 0.1.9
webencodings 0.5.1
Werkzeug 1.0.0
wheel 0.34.2
widgetsnbextension 3.5.1
wordcloud 1.5.0
wrapt 1.12.1
xarray 0.15.0
xgboost 0.90
xkit 0.0.0
xlrd 1.1.0
xlwt 1.3.0
yellowbrick 0.9.1
zict 2.0.0
zipp 3.1.0
###Markdown
Oggetti python
###Code
# [], {},() : LIST -- DICTIONARY -- TUPLE
## LIST ##
lista = [0,1,-2,3]
lista
lista = [0,1,-2,3,'hello',1.0] # int, string,float
lista
type(lista)
# generate fast list:
lista1 = [2]*10
lista1
lista2 = [3]*5
lista2
lista2.append(2)
lista2
#count element inside a list
len(lista2)
lista = [1,2,3,4,5]
lista
#select an element of a list
lista[5] # give an ERROR ## list index 0
lista[0]
#lista[1]
lista[4]
lista[-1]
lista[-2] ## testare anche con -3 -4 -5
primi = [2,3,5,7,11,13,17,19,23,29,31]
primi[0:] # tutti
#primi[0:] # escluso il primo
selection = primi[0:1] #solo il primo
selection
for pippo in primi:
print(pippo)
lista_numerica = list(range(300,400,2)) ## solo numeri pari
lista_numerica
## DICTIONARY ##
dizionario = {'key':'value'}
dizionario
d = {'one':1,'two':2}
d
e = {1:133,2:246}
e
f = {'one':1,2:2,3:'three'}
f.values()
d.keys()
d.values()
### TUPLE #### (immutable)
tup1 = (1,2,3)
tup2 = (1,4,9)
tup1
tup2
type(tup1)
### convert tuple to list
lista = [tup1,tup2]
lista
lista[1] # perchè index 0
lista[0][1] #2
lista[1][1] #4
###Output
_____no_output_____
###Markdown
Array
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
**Arrays can contain any type of element value (primitive types or objects), but you can't store different types in a single array. You can have an array of integers or an array of strings or an array of arrays**
###Code
country = np.array(['USA', 'Japan', 'UK', 'Celestopoli', 'India', 'China'])
print(country)
x = np.array([2,3,1,0])
x.shape
y = np.array([[2,3,1,0],[1,1,1,1]])
y.shape
np.zeros((2, 3))
k =np.ones((2, 3))
k
k.shape
print(k*5)
#print(k+1)
k
np.arange(10)
np.arange(2, 10, dtype=float) # default strp=1
np.arange(2, 3, 0.1)
np.linspace(1., 4., 6)
x = np.array([3, 6, 9, 12])
x/3.0
x
print(x)
type(x)
a = np.array([[1, 2, 3], [4, 5, 6]])
b = np.array([[9, 8, 7], [6, 5, 4]])
c = np.concatenate((a, b))
c
c.shape
np.ones((2, 2), dtype=bool)
z = np.array([[1, 0, 1], [0, 0, 0]],dtype=bool)
z
## Conversion to an array:
import numpy as np
my_list = [1, 2, 3, 4, 5, 6, 7, 8]
my_list = np.asarray(my_list)
#type(my_list)
my_list
my_tuple = ([8, 4, 6], [1, 2, 3])
type(my_tuple)
my_tuple = np.asarray(my_tuple)
###Output
_____no_output_____
###Markdown
List vs Tuple
###Code
####### List vs Array
y = [3, 6, 9, 12]
y/3
##Give the error
# Sorting a list
numbers = [1, 3, 4, 2]
numbers
numbers.sort()
print(numbers)
## String list
a = ["mela", "banana", "arancia"]
a
a[0] = "fragola"
a
type(a)
a = ("mela", "banana", "arancia")
a
type(a)
a[0] = "fragola"
## Give the error
###Output
_____no_output_____
###Markdown
List recap
###Code
a = []
a.append("Hello")
a
a.append("FAV's")
a.append("smart")
a.append("students")
print("The length of this list is: ", len(a))
type(a)
a # or print(a)
# remove one element from list
a.remove(a[0]) #remove first element 'Hello'
a
###Output
_____no_output_____
###Markdown
Matplotlib
###Code
%matplotlib inline
# to avoid avery time to write plot.show() -- if you use this code into a .py file, please remove %matplotlib inline
plt.show() at the end of all your plotting commands to have the figure pop up in another window.
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import normal,rand
# rand by default, float number [0,1]
a = rand(100)
b = rand(100)
type(a)
plt.figure(figsize=(15,5))
plt.scatter(a,b)
#plt.show()
a = rand(1000)
b = rand(1000)
plt.scatter(a,b);
#plt.show()
print('')
###Output
_____no_output_____ |
lecture_04/03_neural_network.ipynb | ###Markdown
ニューラルネットワーク複数のニューロンからなるニューラルネットワークを構築し、Irisの品種分類を行います。 ● ニューラルネットワークの構築ニューロンを層状に並べて、入力層、中間層、出力層とします。 入力層は入力を受け取るのみですが、中間層には2つ、出力層には1つのニューロンを配置します。
###Code
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
iris = datasets.load_iris()
iris_data = iris.data
sl_data = iris_data[:100, 0] # SetosaとVersicolor、Sepal length
sw_data = iris_data[:100, 1] # SetosaとVersicolor、Sepal width
# 平均値を0に
sl_ave = np.average(sl_data) # 平均値
sl_data -= sl_ave # 平均値を引く
sw_ave = np.average(sw_data)
sw_data -= sw_ave
# 入力をリストに格納
input_data = []
for i in range(100): # iには0から99までが入る
input_data.append([sl_data[i], sw_data[i]])
# シグモイド関数
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
# ニューロン
class Neuron:
def __init__(self): # 初期設定
self.input_sum = 0.0
self.output = 0.0
def set_input(self, inp):
self.input_sum += inp
def get_output(self):
self.output = sigmoid(self.input_sum)
return self.output
def reset(self):
self.input_sum = 0
self.output = 0
# ニューラルネットワーク
class NeuralNetwork:
def __init__(self): # 初期設定
# 重み
self.w_im = [[4.0, 4.0], [4.0, 4.0]] # 入力:2 ニューロン数:2
self.w_mo = [[1.0, -1.0]] # 入力:2 ニューロン数:1
# バイアス
self.b_m = [2.0, -2.0] # ニューロン数:2
self.b_o = [-0.5] # ニューロン数:1
# 各層の宣言
self.input_layer = [0.0, 0.0]
self.middle_layer = [Neuron(), Neuron()]
self.output_layer = [Neuron()]
def commit(self, input_data): # 実行
# 各層のリセット
self.input_layer[0] = input_data[0] # 入力層は値を受け取るのみ
self.input_layer[1] = input_data[1]
self.middle_layer[0].reset()
self.middle_layer[1].reset()
self.output_layer[0].reset()
# 入力層→中間層
self.middle_layer[0].set_input(self.input_layer[0] * self.w_im[0][0])
self.middle_layer[0].set_input(self.input_layer[1] * self.w_im[0][1])
self.middle_layer[0].set_input(self.b_m[0])
self.middle_layer[1].set_input(self.input_layer[0] * self.w_im[1][0])
self.middle_layer[1].set_input(self.input_layer[1] * self.w_im[1][1])
self.middle_layer[1].set_input(self.b_m[1])
# 中間層→出力層
self.output_layer[0].set_input(self.middle_layer[0].get_output() * self.w_mo[0][0])
self.output_layer[0].set_input(self.middle_layer[1].get_output() * self.w_mo[0][1])
self.output_layer[0].set_input(self.b_o[0])
return self.output_layer[0].get_output()
# ニューラルネットワークのインスタンス
neural_network = NeuralNetwork()
# 実行
st_predicted = [[], []] # Setosa
vc_predicted = [[], []] # Versicolor
for data in input_data:
if neural_network.commit(data) < 0.5:
st_predicted[0].append(data[0]+sl_ave)
st_predicted[1].append(data[1]+sw_ave)
else:
vc_predicted[0].append(data[0]+sl_ave)
vc_predicted[1].append(data[1]+sw_ave)
# 分類結果をグラフ表示
plt.scatter(st_predicted[0], st_predicted[1], label="Setosa")
plt.scatter(vc_predicted[0], vc_predicted[1], label="Versicolor")
plt.legend()
plt.xlabel("Sepal length (cm)")
plt.ylabel("Sepal width (cm)")
plt.title("Predicted")
plt.show()
# コードの練習用
###Output
_____no_output_____ |
Produce_2N/Produce 2N.ipynb | ###Markdown
CRN for Produce 2N w/ Broccoli Aptamer
###Code
# Create CRN of Produce 2N
Prod2_off = Species("Prod2")
rR1 = Species("rR1")
Broccoli_Aptamer = Species("BrocApt")
dA1 = Species("dA1" )
DFHBI = Species("DFHBI")
Prod2 = Genelet(Prod2_off, transcript= Broccoli_Aptamer , activator= dA1 , inhibitor= rR1)
M_Prod2 = Mixture(name = "Produce2_test", components = [Prod2], parameter_file = "default_parameters.txt")
repr(M_Prod2)
CRN_Prod2 = M_Prod2.compile_crn()
rxn1 = Reaction([Broccoli_Aptamer,DFHBI], [ComplexSpecies([Broccoli_Aptamer, DFHBI], name = "Flu1")], k = 9.96e-2)
CRN_Prod2.add_reactions([rxn1])
# Write SBML
#CRN_Prod2.write_sbml_file('Prod2_CRN.xml')
print(CRN_Prod2)
# BioSCRAPE simulation of Produce2N with No repressors
io = {"Prod2_OFF": 5000, "dA1": 5000, "rR1": 0, "BrocApt": 0, "DFHBI": 60000,
"protein_RNAseH":0, "protein_RNAP":200}
# For label convenience
x = 'Time (hours)'
y = 'Concentration (uM)'
timepoints = np.linspace(0, 28800, 1000)
R = CRN_Prod2.simulate_with_bioscrape(timepoints, initial_condition_dict = io)
timepoints = timepoints/3600
bokeh.io.output_notebook()
p = bokeh.plotting.figure(
plot_width = 400,
plot_height = 300,
x_axis_label = x,
y_axis_label = y,
)
p.circle(timepoints, R["Prod2_OFF"], legend_label = "OFF Produce 2", color = "orange")
p.circle(timepoints, R["complex_Prod2_ON"], legend_label = "ON Produce 2" , color = "red")
p.legend.click_policy="hide"
p.legend.location = "center_right"
r = bokeh.plotting.figure(
plot_width = 400,
plot_height = 300,
x_axis_label = x,
y_axis_label = y,)
r.circle(timepoints, R["complex_Flu1"], legend_label = "Fluoresence", color = "darkgreen")
r.legend.location = "bottom_right"
s = bokeh.plotting.figure(
plot_width = 400,
plot_height = 300,
x_axis_label = x,
y_axis_label = y,)
s.circle(timepoints, R["BrocApt"], legend_label = "Broc Apt", color = "green")
s.legend.click_policy="hide"
s.legend.location = "top_left"
#### Attempt to track DFHBI dye molecules ####
#### Ended with key error #####
t = bokeh.plotting.figure(
plot_width = 400,
plot_height = 300,
x_axis_label = x,
y_axis_label = y,)
t.circle(timepoints, R["DFHBI"], legend_label = "DFHBI molecules", color = "lightgreen")
t.legend.click_policy="hide"
t.legend.location = "top_right"
bokeh.io.show(row (p, s, r, t))
warnings.filterwarnings("ignore")
###Output
C:\Users\Jeremiah\anaconda3\lib\site-packages\html5lib\_trie\_base.py:3: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working
from collections import Mapping
C:\Users\Jeremiah\anaconda3\lib\importlib\_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject
return f(*args, **kwds)
C:\Users\Jeremiah\anaconda3\lib\site-packages\biocrnpyler\chemical_reaction_network.py:1089: UserWarning: The following species are uninitialized and their value has been defaulted to 0: DFHBI, complex_Flu1,
initial_condition_dict = initial_condition_dict)
C:\Users\Jeremiah\anaconda3\lib\site-packages\scipy\integrate\odepack.py:248: ODEintWarning: Excess work done on this call (perhaps wrong Dfun type). Run with full_output = 1 to get quantitative information.
warnings.warn(warning_msg, ODEintWarning)
odeint failed with mxstep=500... |
Week02/Day02/Linear Regression Multivariate/2_linear_regression_multivariate.ipynb | ###Markdown
Machine Learning With Python: Linear Regression Multiple Variables Sample problem of predicting home price in monroe, new jersey (USA) Below is the table containing home prices in monroe twp, NJ. Here price depends on **area (square feet), bed rooms and age of the home (in years)**. Given these prices we have to predict prices of new homes based on area, bed rooms and age. Given these home prices find out price of a home that has,**3000 sqr ft area, 3 bedrooms, 40 year old****2500 sqr ft area, 4 bedrooms, 5 year old**
###Code
import pandas as pd
import numpy as np
from sklearn import linear_model
df = pd.read_csv('homeprices.csv')
df.head()
df.shape
df.bedrooms.median()
###Output
_____no_output_____
###Markdown
**Data Preprocessing: Fill NA values with median value of a column**
###Code
df.bedrooms = df.bedrooms.fillna(df.bedrooms.median())
df
x = df[['area','bedrooms','age']]
x
y = df.price
y
###Output
_____no_output_____
###Markdown
**Now Apply Linear Regression model**
###Code
reg = linear_model.LinearRegression()
reg.fit(x,y)
reg.coef_
reg.intercept_
###Output
_____no_output_____
###Markdown
**Find price of home with 3000 sqr ft area, 3 bedrooms, 40 year old**
###Code
reg.predict([[3000,3,40]])
112.06244194*3000+23388.88007794*3+-3231.71790863*40+221323.00186540408
###Output
_____no_output_____
###Markdown
**Find price of home with 2500 sqr ft area, 4 bedrooms, 5 year old**
###Code
reg.predict([[2500,4,5]])
###Output
_____no_output_____ |
cuml/random_forest_mnmg_demo.ipynb | ###Markdown
Random Forests Multi-node, Multi-GPU demoThe experimental cuML multi-node, multi-GPU (MNMG) implementation of random forests leverages Dask to do embarrassingly-parallel model fitting. For a random forest with `N` trees being fit by `W` workers, each worker will build `N / W` trees. During inference, predictions from all `N` trees will be combined.The caller is responsible for partitioning the data efficiently via Dask. To build an accurate model, it's important to ensure that each worker has a representative chunk of the data. This can come by distributing the data evenly after ensuring that it is well shuffled. Or, given sufficient memory capacity, the caller can replicate the data to all workers. This approach will most closely simulate the single-GPU building approach.**Note:** cuML 0.9 contains the first, experimental preview release of the MNMG random forest model. The API is subject to change in future releases, and some known limitations remain (listed in the documentation).For more information on MNMG Random Forest models, see the documentation: * https://rapidsai.github.io/projects/cuml/en/latest/api.htmlcuml.dask.ensemble.RandomForestClassifier * https://rapidsai.github.io/projects/cuml/en/latest/api.htmlcuml.dask.ensemble.RandomForestRegressor
###Code
import numpy as np
import sklearn
import pandas as pd
import cudf
import cuml
from sklearn.metrics import accuracy_score
from sklearn import model_selection, datasets
from cuml.dask.common import utils as dask_utils
from dask.distributed import Client, wait
from dask_cuda import LocalCUDACluster
import dask_cudf
from cuml.dask.ensemble import RandomForestClassifier as cumlDaskRF
from sklearn.ensemble import RandomForestClassifier as sklRF
###Output
_____no_output_____
###Markdown
Start Dask cluster
###Code
# This will use all GPUs on the local host by default
cluster = LocalCUDACluster(threads_per_worker=1)
c = Client(cluster)
# Query the client for all connected workers
workers = c.has_what().keys()
n_workers = len(workers)
n_streams = 8 # Performance optimization
###Output
_____no_output_____
###Markdown
Define ParametersIn addition to the number of examples, random forest fitting performance depends heavily on the number of columns in a dataset and (especially) on the maximum depth to which trees are allowed to grow. Lower `max_depth` values can greatly speed up fitting, though going too low may reduce accuracy.
###Code
# Data parameters
train_size = 100000
test_size = 1000
n_samples = train_size + test_size
n_features = 20
# Random Forest building parameters
max_depth = 12
n_bins = 16
n_trees = 1000
###Output
_____no_output_____
###Markdown
Generate Data on hostIn this case, we generate data on the client (initial process) and pass it to the workers. You could also load data directly onto the workers via, for example, `dask_cudf.read_csv()`. See also the k-means MNMG notebook (kmeans_mnmg_demo.ipynb) for an alternative method of generating data on the worker nodes.
###Code
X, y = datasets.make_classification(n_samples=n_samples, n_features=n_features,
n_clusters_per_class=1, n_informative=int(n_features / 3),
random_state=123, n_classes=5)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=test_size)
###Output
_____no_output_____
###Markdown
Distribute data to worker GPUs
###Code
n_partitions = n_workers
# First convert to cudf (with real data, you would likely load in cuDF format to start)
X_train_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X_train))
y_train_cudf = cudf.Series(y_train)
# Partition with Dask
# In this case, each worker will train on 1/n_partitions fraction of the data
X_train_dask = dask_cudf.from_cudf(X_train_cudf, npartitions=n_partitions)
y_train_dask = dask_cudf.from_cudf(y_train_cudf, npartitions=n_partitions)
# Persist to cache the data in active memory
X_train_dask, y_train_dask = \
dask_utils.persist_across_workers(c, [X_train_dask, y_train_dask], workers=workers)
###Output
_____no_output_____
###Markdown
Build a scikit-learn model (single node)Dask does not currently have a simple wrapper for scikit-learn's RandomForest, but scikit-learn does offer multi-CPU support via joblib, which we'll use.
###Code
%%time
# Use all avilable CPU cores
skl_model = sklRF(max_depth=max_depth, n_estimators=n_trees, n_jobs=-1)
skl_model.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Train the distributed cuML model
###Code
%%time
cuml_model = cumlDaskRF(max_depth=max_depth, n_estimators=n_trees, n_bins=n_bins, n_streams=n_streams)
cuml_model.fit(X_train_dask, y_train_dask)
wait(cuml_model.rfs) # Allow asynchronous training tasks to finish
###Output
_____no_output_____
###Markdown
Predict and check accuracy
###Code
skl_y_pred = skl_model.predict(X_test)
cuml_y_pred = cuml_model.predict(X_test)
# Due to randomness in the algorithm, you may see slight variation in accuracies
print("SKLearn accuracy: ", accuracy_score(y_test, skl_y_pred))
print("CuML accuracy: ", accuracy_score(y_test, cuml_y_pred))
###Output
_____no_output_____
###Markdown
Random Forests Multi-node, Multi-GPU demoThe experimental cuML multi-node, multi-GPU (MNMG) implementation of random forests leverages Dask to do embarrassingly-parallel model fitting. For a random forest with `N` trees being fit by `W` workers, each worker will build `N / W` trees. During inference, predictions from all `N` trees will be combined.The caller is responsible for partitioning the data efficiently via Dask. To build an accurate model, it's important to ensure that each worker has a representative chunk of the data. This can come by distributing the data evenly after ensuring that it is well shuffled. Or, given sufficient memory capacity, the caller can replicate the data to all workers. This approach will most closely simulate the single-GPU building approach.**Note:** cuML 0.9 contains the first, experimental preview release of the MNMG random forest model. The API is subject to change in future releases, and some known limitations remain (listed in the documentation).For more information on MNMG Random Forest models, see the documentation: * https://rapidsai.github.io/projects/cuml/en/latest/api.htmlcuml.dask.ensemble.RandomForestClassifier * https://rapidsai.github.io/projects/cuml/en/latest/api.htmlcuml.dask.ensemble.RandomForestRegressor
###Code
import numpy as np
import sklearn
import pandas as pd
import cudf
import cuml
from sklearn.metrics import accuracy_score
from sklearn import model_selection, datasets
from cuml.dask.common import utils as dask_utils
from dask.distributed import Client, wait
from dask_cuda import LocalCUDACluster
import dask_cudf
from cuml.dask.ensemble import RandomForestClassifier as cumlDaskRF
from sklearn.ensemble import RandomForestClassifier as sklRF
###Output
_____no_output_____
###Markdown
Start Dask cluster
###Code
# This will use all GPUs on the local host by default
cluster = LocalCUDACluster(threads_per_worker=1)
c = Client(cluster)
# Query the client for all connected workers
workers = c.has_what().keys()
n_workers = len(workers)
n_streams = 8 # Performance optimization
###Output
_____no_output_____
###Markdown
Define ParametersIn addition to the number of examples, random forest fitting performance depends heavily on the number of columns in a dataset and (especially) on the maximum depth to which trees are allowed to grow. Lower `max_depth` values can greatly speed up fitting, though going too low may reduce accuracy.
###Code
# Data parameters
train_size = 100000
test_size = 1000
n_samples = train_size + test_size
n_features = 20
# Random Forest building parameters
max_depth = 12
n_bins = 16
n_trees = 1000
###Output
_____no_output_____
###Markdown
Generate Data on hostIn this case, we generate data on the client (initial process) and pass it to the workers. You could also load data directly onto the workers via, for example, `dask_cudf.read_csv()`. See also the k-means MNMG notebook (kmeans_mnmg_demo.ipynb) for an alternative method of generating data on the worker nodes.
###Code
X, y = datasets.make_classification(n_samples=n_samples, n_features=n_features,
n_clusters_per_class=1, n_informative=int(n_features / 3),
random_state=123, n_classes=5)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=test_size)
###Output
_____no_output_____
###Markdown
Distribute data to worker GPUs
###Code
n_partitions = n_workers
# First convert to cudf (with real data, you would likely load in cuDF format to start)
X_train_cudf = cudf.DataFrame.from_pandas(pd.DataFrame(X_train))
y_train_cudf = cudf.Series(y_train)
# Partition with Dask
# In this case, each worker will train on 1/n_partitions fraction of the data
X_train_dask = dask_cudf.from_cudf(X_train_cudf, npartitions=n_partitions)
y_train_dask = dask_cudf.from_cudf(y_train_cudf, npartitions=n_partitions)
# Persist to cache the data in active memory
X_train_dask, y_train_dask = \
dask_utils.persist_across_workers(c, [X_train_dask, y_train_dask], workers=workers)
###Output
_____no_output_____
###Markdown
Build a scikit-learn model (single node)Dask does not currently have a simple wrapper for scikit-learn's RandomForest, but scikit-learn does offer multi-CPU support via joblib, which we'll use.
###Code
%%time
# Use all avilable CPU cores
skl_model = sklRF(max_depth=max_depth, n_estimators=n_trees, n_jobs=-1)
skl_model.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Train the distributed cuML model
###Code
%%time
cuml_model = cumlDaskRF(max_depth=max_depth, n_estimators=n_trees, n_bins=n_bins, n_streams=n_streams)
cuml_model.fit(X_train_dask, y_train_dask)
wait(cuml_model.rfs) # Allow asynchronous training tasks to finish
###Output
_____no_output_____
###Markdown
Predict and check accuracy
###Code
skl_y_pred = skl_model.predict(X_test)
cuml_y_pred = cuml_model.predict(X_test)
# Due to randomness in the algorithm, you may see slight variation in accuracies
print("SKLearn accuracy: ", accuracy_score(y_test, skl_y_pred))
print("CuML accuracy: ", accuracy_score(y_test, cuml_y_pred))
###Output
_____no_output_____ |
j_notebooks/Rivals.ipynb | ###Markdown
Source: Rivals
###Code
#hide
import core_constants as cc
import functions as fx
import json
import pandas as pd
import sqlite3 as sql
###Output
_____no_output_____
###Markdown
Set Notebook Settings
###Code
conference = 'sunbelt'
years = cc.get_defYears()
headers= cc.get_header()
schoolsList = cc.get_schoolsList()
teamDirectory = cc.get_htmlDir('rivals', conference, 'teams')
playerDirectory = cc.get_htmlDir('rivals', conference, 'recruits')
#testDirectory = '..//tests//'
###Output
_____no_output_____
###Markdown
Get & Save the Teams & Players Page HTML Source: https://maryland.rivals.com/commitments/football/2012> This page contains metadata of each player along with the Rivals ranking and stars. Unlike 247Sports, we process the fetch and save of both pages directly from a single function
###Code
fx.get_Rivals(conference, schoolsList, years, headers, sleepyTime=6)
###Output
_____no_output_____
###Markdown
Process Local Rivals HTML Files> All of this processing is done locally, using the files saved in the previous few steps. This creates an exhaustive store of all the fields grabbed from the scrapes.
###Code
cc.save_records('scrapedData', 'rivals_' + conference, fx.process_Rivals(playerDirectory, conference, schoolsList))
###Output
_____no_output_____
###Markdown
Save to Database
###Code
fx.toDB_Rivals()
conferences = cc.get_availableConferences()
#conferences = ['sunbelt']
for conf in conferences:
print ("working on - " + conf)
conference = conf
years = cc.get_defYears()
headers= cc.get_header()
schoolsList = cc.get_schoolsList()
teamDirectory = cc.get_htmlDir('rivals', conference, 'teams')
playerDirectory = cc.get_htmlDir('rivals', conference, 'recruits')
if (conf == 'acc' or conf == 'pactwelve'):
cc.save_records('scrapedData', 'rivals_' + conference, fx.process_Rivals(playerDirectory, conference, schoolsList, 'utf-8'))
else:
cc.save_records('scrapedData', 'rivals_' + conference, fx.process_Rivals(playerDirectory, conference, schoolsList, 'windows-1252'))
bigten, big twelve, sec, american, independents, cusa, mac, mwc, sunbelt - windows-1252
acc, pactwelve - utf-8
###Output
_____no_output_____ |
Data-Visualization/1-Visualization-and-Reports.ipynb | ###Markdown
EDA & Data Visualization In this section we will visualize the data and make useful reports and dashboards in order to get more familiar and have a more clear vision of this data.For this part, we import our data and change the index to datetime so we can work on the time series data. We will start by showing what are the top products that we sold across the globe. There two indicators that can show us how much benefit came from each product. In the first plot of the subplot below, we can see the top 20 products that were bought in the most amounts by the customers and in the second plot we can see which products have brought us the most monetary benefit.
###Code
#importing necessary libraries and the cleaned dataset
import pandas as pd, numpy as np, matplotlib.pyplot as plt, seaborn as sns
%matplotlib inline
CleanDataset = r'../Cleaned-Dataset/OnlineRetail_Cleaned.csv'
Data_Cleaned = pd.read_csv(CleanDataset, index_col = 'InvoiceDate')
Data_Cleaned.index = pd.to_datetime(Data_Cleaned.index, format = '%Y-%m-%d %H:%M', box = False)
#top 20 products by quantity and finalprice
sns.set_style('whitegrid')
Top20Quan = Data_Cleaned.groupby('Description')['Quantity'].agg('sum').sort_values(ascending=False)[0:20]
Top20Price = Data_Cleaned.groupby('Description')['FinalPrice'].agg('sum').sort_values(ascending=False)[0:20]
#creating the subplot
fig,axs = plt.subplots(nrows=2, ncols=1, figsize = (12,12))
plt.subplots_adjust(hspace = 0.3)
fig.suptitle('Best Selling Products by Amount and Value', fontsize=15, x = 0.4, y = 0.98)
sns.barplot(x=Top20Quan.values, y=Top20Quan.index, ax= axs[0]).set(xlabel='Total amount of sales')
axs[0].set_title('By Amount', size=12, fontweight = 'bold')
sns.barplot(x=Top20Price.values, y=Top20Price.index, ax= axs[1]).set(xlabel='Total value of sales')
axs[1].set_title('By Value', size=12, fontweight = 'bold')
plt.show()
###Output
_____no_output_____
###Markdown
The next statistic that we are interested in is that which products werer mostly returned by our customers and also which customers and from which countries had the most returned items in their transactions.
###Code
#finding the most returned items and the customers with the corresponding country
ReturnedItems = Data_Cleaned[Data_Cleaned.Quantity<0].groupby('Description')['Quantity'].sum()
ReturnedItems = ReturnedItems.abs().sort_values(ascending=False)[0:10]
ReturnCust = Data_Cleaned[Data_Cleaned.Quantity<0].groupby(['CustomerID','Country'])['Quantity'].sum()
ReturnCust = ReturnCust.abs().sort_values(ascending=False)[0:10]
#creting the subplot
fig, [ax1, ax2] = plt.subplots(nrows=2, ncols=1, figsize=(12,10))
ReturnedItems.sort_values().plot(kind='barh', ax=ax1).set_title('Most Returned Items', fontsize=15)
ReturnCust.sort_values().plot(kind='barh', ax=ax2).set_title('Customers with Most Returns', fontsize=15)
ax1.set(xlabel='Quantity')
ax2.set(xlabel='Quantity')
plt.subplots_adjust(hspace=0.4)
plt.show()
###Output
_____no_output_____
###Markdown
In the jointplot below, we can see the pairwise comparison between the 'UnitPrice' and the 'Quantity' of the purchased products. It makes sense that as the price of a product increases, the amount of sales of that product would get smaller and customers are more inclined to buy products in larger quantities if they have lower prices.
###Code
#plotting the qunatity vs unitprice
Corr = sns.jointplot(x="Quantity", y="UnitPrice", data = Data_Cleaned[Data_Cleaned.FinalPrice>0], height = 7)
Corr.fig.suptitle("UnitPrice and Quantity Comparison", fontsize = 15, y = 1.1)
plt.show()
###Output
_____no_output_____
###Markdown
In the next chart we are going to see the trend of the sales during the year in a weekly manner. We can get the weekly sales by resampling our time series data to weeks and get the sum of the values in each week. In the first charts we can see the weekly sales and in the second one the weekly returns by customers. After a sudden decline in January, we can see an almost upward trend in the sales. As for the returns, except for the second week of October, it is almost invariant but with a slight increase.
###Code
#resampling to get the weekly sales and returns
WeeklySale = Data_Cleaned[Data_Cleaned['Quantity']>0].Quantity.resample('W').sum()
WeeklyRet = Data_Cleaned[Data_Cleaned['Quantity']<0].Quantity.resample('W').sum().abs()
#creating the subplot
fig,[ax1, ax2] = plt.subplots(nrows=1,ncols=2, figsize = (15,5))
WeeklySale.plot(ax=ax1).set(xlabel="Month", ylabel="Quantity")
ax1.set_title("Weekly Sales Quantity", fontsize = 15)
WeeklyRet.plot(ax=ax2).set(xlabel="Month", ylabel="Quantity")
ax2.set_title("Weekly Returns Quantity", fontsize = 15)
plt.show()
###Output
_____no_output_____
###Markdown
In the next chart, we are going to see how many items were sold and returned across the foreign countries. Since United Kingdom has the majority of sales and it will not give us any useful information, we will not show it in our chart so it would have a better and more informing look. It looks like our product were mostly sold in Netherlands and mostly returned in Ireland(EIRE).
###Code
#grouping data by the countries(except UK)
ByCountrySale = Data_Cleaned[(Data_Cleaned.Country != 'UNITED KINGDOM') & (Data_Cleaned.Quantity > 0)].groupby('Country')['Quantity'].sum()
ByCountryRet = Data_Cleaned[(Data_Cleaned.Country != 'UNITED KINGDOM') & (Data_Cleaned.Quantity < 0)].groupby('Country')['Quantity'].sum().abs()
#creating the subplot
fig, [ax1,ax2] = plt.subplots(nrows=2,ncols=1,figsize=(10,14))
ByCountrySale.plot(kind='bar', ax=ax1).set(ylabel = 'Quantity',xlabel='')
ax1.set_title('Sales', size=12, fontweight = 'bold')
ByCountryRet.plot(kind='bar', ax=ax2).set(ylabel = 'Quantity',xlabel='')
ax2.set_title('Returns', size=12, fontweight = 'bold')
plt.suptitle('Sales in Foreign Countries', fontsize = 15)
plt.subplots_adjust(hspace = 0.6)
plt.show()
###Output
_____no_output_____
###Markdown
Since we got the day of the week in which the items were sold, we can use it to see the sales value by each day of the week. As it is obvious Thursday has the most and Sunday has the least value.
###Code
#creating the pie chart
Data_Cleaned.groupby('Day of week')['FinalPrice'].sum().plot(kind = 'pie', autopct = '%.2f%%', figsize=(7,7)).set(ylabel='')
plt.title('Percantages of Sales Value by Day of Week', fontsize = 15)
plt.show()
###Output
_____no_output_____
###Markdown
We can filter out our best customers based on the value that they brought to the company and also show from which countries they come from.
###Code
#filtering customers by the total finalprice
Top10Customers = Data_Cleaned.groupby(['CustomerID','Country'])['FinalPrice'].sum().sort_values(ascending=False)[0:10]
#creating the barplot
plt.figure(figsize=(8,5))
sns.barplot(x=Top10Customers.values, y=Top10Customers.index).set(xlabel='Total Value',ylabel='CustomerID')
plt.suptitle('Top10 Customers and Country of Origin by Sales Value', fontsize = 15)
plt.show()
###Output
_____no_output_____
###Markdown
Another statistic that we could use for the future planning, is how many of our customers are repeat customers, meaning that they bought products from us more than once. In the plot below we can see that almost 70% of the customers are repeat customers. In the other plot we can also see which customers from which countries had the most repeats.
###Code
#grouping customers by the number of their visits and separating them
MostRepeat = Data_Cleaned.groupby(['CustomerID','Country'])['InvoiceNo'].nunique().sort_values(ascending=False)
rep = MostRepeat[MostRepeat != 1].values
nrep = MostRepeat[MostRepeat == 1].values
ser = pd.Series([len(rep)/(len(rep)+len(nrep)),len(nrep)/(len(rep)+len(nrep))], index=['Repeat Customers','One-time Customers'])
#creating the subplot
fig, [ax1, ax2] = plt.subplots(nrows=1, ncols=2, figsize= (15,5), gridspec_kw= {'width_ratios':[3,1]})
plt.subplots_adjust(wspace=0.2)
sns.barplot(x=MostRepeat[0:10].values, y=MostRepeat[0:10].index, ax=ax1).set(xlabel='Number of Transactions(Repeats)',ylabel='CustomerID')
ser.plot(kind='pie', autopct='%.2f%%', ax=ax2).set(ylabel='')
plt.suptitle('Top Repeat Customers', fontsize=15)
plt.show()
###Output
_____no_output_____
###Markdown
In the plots below, we can see the distribution plots of the the 'Quantity' and 'UnitPrice' attributes.
###Code
#creating distribution plots
fig , [ax1,ax2] = plt.subplots(nrows=1,ncols=2,figsize=(12,4))
with sns.axes_style('dark'):
sns.distplot(Data_Cleaned['Quantity'], ax=ax1)
sns.distplot(Data_Cleaned['UnitPrice'], ax=ax2)
fig.suptitle('UnitPrice and Quantity Distribution', fontsize = 15)
plt.show()
###Output
_____no_output_____
###Markdown
In the last plot, we will use three features two show how the sales are distributed among different months and days of week. To show that, we will use seaborn's heatmap. The x-axis shows the day and the y-axis shows the month in which the items were bought. The color scale shows the total value of sales.
###Code
HM_Data = Data_Cleaned.pivot_table(index = 'InvoiceMonth',columns = 'Day of week', values = 'FinalPrice', aggfunc='sum')
plt.figure(figsize = (10,6))
sns.heatmap(HM_Data, cmap = 'vlag').set(xlabel='', ylabel='')
plt.title('Sales Value per Month and Day of Week', fontsize = 15)
plt.show()
###Output
_____no_output_____ |
In Progress - Deep Learning - The Straight Dope/03 - Deep Neural Networks/02 - Multilayer perceptrons in gluon.ipynb | ###Markdown
02 - Multilayer perceptrons in gluon
###Code
import mxnet as mx
import numpy as np
from mxnet import gluon
from tqdm import tqdm
###Output
_____no_output_____
###Markdown
Context
###Code
data_ctx = mx.cpu()
model_ctx = mx.cpu()
###Output
_____no_output_____
###Markdown
MNIST Dataset
###Code
batch_size = 64
num_inputs = 784
num_outputs = 10
num_examples = 60000
def transform(data, label):
return data.astype(np.float32) / 255, label.astype(np.float32)
train_data = gluon.data.DataLoader(dataset=gluon.data.vision.MNIST(train=True, transform=transform),
batch_size=batch_size,
shuffle=True)
test_data = gluon.data.DataLoader(dataset=gluon.data.vision.MNIST(train=False, transform=transform),
batch_size=batch_size,
shuffle=False)
###Output
_____no_output_____
###Markdown
Define MLP model with mx.Block
###Code
class MLP(gluon.Block):
def __init__(self, **kwargs):
super(MLP, self).__init__(**kwargs)
with self.name_scope():
self.dense0 = gluon.nn.Dense(64)
self.dense1 = gluon.nn.Dense(64)
self.dense2 = gluon.nn.Dense(10)
def forward(self, x):
x = mx.nd.relu(self.dense0(x))
x = mx.nd.relu(self.dense1(x))
x = self.dense2(x)
return x
net = MLP()
net.collect_params().initialize(mx.init.Normal(sigma=.01),
ctx=model_ctx)
###Output
_____no_output_____
###Markdown
Example of a single forward pass
###Code
data = mx.nd.ones(shape=[1, 784])
class MLP(gluon.Block):
def __init__(self, **kwargs):
super(MLP, self).__init__(**kwargs)
with self.name_scope():
self.dense0 = gluon.nn.Dense(units=64, activation="relu")
self.dense1 = gluon.nn.Dense(units=64, activation="relu")
self.dense2 = gluon.nn.Dense(units=10)
def forward(self, x):
x = self.dense0(x)
print("-" * 70)
print("Hidden Representation 1: %s" % x)
x = self.dense1(x)
print("-" * 70)
print("Hidden Representation 2: %s" % x)
x = self.dense2(x)
print("-" * 70)
print("Network output: %s" % x)
print("-" * 70)
return x
net = MLP()
net.collect_params().initialize(mx.init.Normal(sigma=.01), ctx=model_ctx)
net(data.as_in_context(model_ctx))
###Output
----------------------------------------------------------------------
Hidden Representation 1:
[[0. 0.25953296 0.5081844 0.47407073 0.5739144 0.04646487
0.3490802 0. 0. 0. 0. 0.
0.09897906 0. 0.44429356 0.5806929 0. 0.
0.07937321 0.13445261 0.17002776 0. 0.59629107 0.
0.51476306 0.2620116 0.07252947 0. 0.44609177 0.
0.10297956 0.12023637 0.01070242 0.14927042 0. 0.11931495
0.06247869 0.34996682 0.23720959 0.33213574 0. 0.
0.35576025 0.02980644 0. 0. 0.3602543 0.01930529
0.5578985 0. 0. 0.22368181 0.3668564 0.0344954
0.16685106 0. 0.07805604 0.04645126 0.46009526 0.
0. 0. 0. 0.4059968 ]]
<NDArray 1x64 @cpu(0)>
----------------------------------------------------------------------
Hidden Representation 2:
[[0. 0. 0.00471901 0.00809325 0.00563266 0.00358269
0.01304015 0. 0. 0.0179144 0.00409093 0.01971137
0.01811438 0. 0. 0.03330275 0.03080758 0.
0.01005297 0. 0. 0. 0. 0.
0. 0. 0. 0.01851467 0. 0.00467824
0. 0.00476716 0.00890849 0. 0.01493133 0.
0.01890475 0. 0.01004198 0. 0. 0.
0. 0. 0.0218619 0. 0.01256697 0.
0.00875257 0.01837254 0. 0.012395 0. 0.
0. 0. 0.03347883 0. 0.00547096 0.0096815
0.03013829 0. 0.02648943 0. ]]
<NDArray 1x64 @cpu(0)>
----------------------------------------------------------------------
Network output:
[[ 0.0010479 -0.00023263 0.00024665 -0.00137001 -0.00089217 -0.00043491
0.0017453 -0.00114445 0.00024293 -0.0004818 ]]
<NDArray 1x10 @cpu(0)>
----------------------------------------------------------------------
###Markdown
Faster modeling with gluon.nn.Sequential
###Code
num_hidden = 64
# Defining a sequential model
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(units=num_hidden,
activation="relu"))
net.add(gluon.nn.Dense(units=num_hidden,
activation="relu"))
net.add(gluon.nn.Dense(units=num_outputs))
# Parameter initialization
net.collect_params().initialize(mx.init.Normal(sigma=.1),
ctx=model_ctx)
# Softmax cross-entropy
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
# Optimizer
trainer = gluon.Trainer(params=net.collect_params(),
optimizer='sgd',
optimizer_params={'learning_rate': 0.01})
###Output
_____no_output_____
###Markdown
Evaluation
###Code
def evaluate_accuracy(data_iterator, net):
acc = mx.metric.Accuracy()
for i, (data, label) in enumerate(data_iterator):
data = data.as_in_context(model_ctx).reshape((-1, 784))
label = label.as_in_context(model_ctx)
output = net(data)
predictions = mx.nd.argmax(data=output,
axis=1)
# Updating accuracy metric
acc.update(preds=predictions,
labels=label)
return acc.get()[1]
###Output
_____no_output_____
###Markdown
Training
###Code
epochs = 10
smoothing_constant = .01
for e in tqdm(range(epochs)):
cumulative_loss = 0
for i, (data, label) in enumerate(train_data):
data = data.as_in_context(model_ctx).reshape((-1, 784))
label = label.as_in_context(model_ctx)
with mx.autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
trainer.step(data.shape[0])
cumulative_loss += mx.nd.sum(loss).asscalar()
test_accuracy = evaluate_accuracy(test_data, net)
train_accuracy = evaluate_accuracy(train_data, net)
print("Epoch %s. Loss: %s, Train_acc %s, Test_acc %s" %
(e, cumulative_loss/num_examples, train_accuracy, test_accuracy))
train_accuracy
test_accuracy
###Output
_____no_output_____ |
week_3/Exercise03_H07_05.ipynb | ###Markdown
Econophysics I Exercise 03 - H07 Juan Camilo Henao Londono Universität Duisburg-Essen05.05.2020
###Code
import numpy as np
from matplotlib import pyplot as plt
###Output
_____no_output_____
###Markdown
Exercise 03. Homework 07. Point 05Additional task: generate the random numbers for the random walk after $q_{2} \left(\varepsilon\right)$ directly from the random numbers they have received for $q_{1} \left(\varepsilon\right)$. Use the sign for that. Compare both random walks.
###Code
# Constant values
a1 = 3. ** 0.5
b1 = 1. / (2. * a1)
a2 = 1.
b2 = 0.5
# Number of random numbers
N = 10000
# random number between -a1 and a1
eps_1 = 2. * (np.random.random(N) - 0.5) * a1
# random number between -a2 and a2. From the distribution
# the result can only be -a2 or a2.
eps_2 = ((2. * np.random.randint(2, size=N)) - 1) * a2
# Sign from eps_2
eps_2_signs = np.sign(eps_1)
fig2 = plt.figure(figsize=(16,9))
plt.plot(np.cumsum(eps_1), label="$q_1$")
plt.plot(np.cumsum(eps_2_signs), label=r'$q_1 \rightarrow q_2$')
plt.xlabel('t', fontsize=40)
plt.ylabel('Random walk', fontsize=40)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.legend(loc='best', fontsize=30)
plt.tight_layout()
plt.grid(True)
###Output
_____no_output_____ |
Eigenvalue_and_Eigenvector.ipynb | ###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
X = np.linalg.solve(A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t=np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
#from scipy.linalg import solve
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
B = np.array([[0],[0]])
print(B)
inv_A = np.linalg.inv(A)
print(inv_A)
X = np.dot(inv_A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are",w)
print("The right eigenvectors are",v)
x = v.round()
print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
f = np.array([[-1],[-1],[-1]])
print(f)
g = np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
X = solve(A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalues/s is/are:", w)
print("The right eigenvectors are:", v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
f = np.array([[-1],[-1],[-1]])
print(f)
g = np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from scipy.linalg import solve
#from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
X = solve(A,B)
print(X)
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalues is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
f = np.array([[-1],[-1],[-1]])
print(f)
g = np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from scipy.linalg import solve
A = np.array([[-12,3],[4,1]]) #matrix A
print("Matrix A:\n",A)
inv_A = np.linalg.inv(A) #inverse function
print("\nInverse Matrix A:\n",inv_A) #prints inverse matrix of A
B = np.array([[0],[0]]) #matrix B
print("\nMatrix B:\n",B)
X = np.dot(inv_A,B) #dot product of the inverse matrix A and matrix B
print("\nDot Product:\n",X)
#Example 1
A = np.array([[-6,3],[4,5]]) #matrix A
print("Matrix A:\n",A)
w,v = np.linalg.eig(A) #eig method to find the eigenvalue and eigenvector of matrix A
print("\nThe eigenvalue/s is/are:",w) #prints eigenvalues of w
print("\nThe eigenvector/s is/are:\n",v) #prints eigenvectors of v
#x = v.round() #prints rounded values of eigenvectors
#print(x)
#Example 2
A = np.array([[2, 2, 4],[1, 3, 5],[2, 3, 4]]) #matrix A
print("Matrix A:\n",A)
s,t = np.linalg.eig(A) #eig method to find the eigenvalue and eigenvector of matrix A
print("\nThe eigenvalue/s is/are:",s.round()) #prints eigenvalues of s
print("\nThe right eigenvector/s is/are:",t.round()) #prints eigenvalues of t
C = np.dot(A,t.round()) #dot product of matrix A and eigenvector
print("\nDot product:\n",C)
###Output
Matrix A:
[[2 2 4]
[1 3 5]
[2 3 4]]
The eigenvalue/s is/are: [ 9. 1. -1.]
The right eigenvector/s is/are: [[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
Dot product:
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A,"\n")
B = np.array([[0],[0]])
print(B,"\n")
invA = np.linalg.inv(A)
X = np.dot(invA,B)
print(X,"\n")
#X = solve(A,B)
#print(X)
###Output
[[-12 3]
[ 4 1]]
[[0]
[0]]
[[0.]
[0.]]
###Markdown
----------------------------------- EXAMPLE 1
###Code
C = np.array([[-6,3],[4,5]])
print(C)
w,v = np.linalg.eig(C)
print("\nThe eigenvalues is/are \n", w)
print("\nThe eigenvectors is are \n", v)
#x = v.round()
#print(x)
###Output
[[-6 3]
[ 4 5]]
The eigenvalues is/are
[-7. 6.]
The eigenvectors is are
[[-0.9486833 -0.24253563]
[ 0.31622777 -0.9701425 ]]
###Markdown
EXAMPLE 2
###Code
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print("\nThe eigenvalues is/are \n", s.round())
print("\nThe eigenvectors is are \n", t.round())
D = np.dot(A,t.round())
print("\n",D)
f = np.array([[-1],[-1],[-1]])
print(f)
g = np.dot(A,f)
print("\n",g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
B = np.array([[0],[0]])
print(B)
X = np.linalg.solve(A, B)
print(X)
X = np.dot(inv_A, B)
A = np.array([[-6,3],[4,5]])
print('Vector A:\n', A)
w, v = eig(A)
print('Eigenvalues:\n', w)
print('Eigenvectors:\n', v)
# Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c=np.dot(A,t.round())
print(c)
###Output
_____no_output_____
###Markdown
###Code
import numpy as np
#from scipy.linalg import solve
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
#X = solve(A,B)
#print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print("The right eigenvenctors are",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
f = np.array([[-1],[-1],[-1]])
print(f)
g =np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np #Import Library
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]]) #Creation of Matrix
print(A) #to print or display
inv_A = np.linalg.inv(A) #to inverse matrix A
print(inv_A) #to print or display
B = np.array([[0],[0]]) #Creation of Matrix B
print(B)
X = np.dot(inv_A,B) #dot product
print(X) #to print or display
#X = solve(A,B)
#print(X)
###Output
[[-12 3]
[ 4 1]]
[[-0.04166667 0.125 ]
[ 0.16666667 0.5 ]]
[[0]
[0]]
[[0.]
[0.]]
###Markdown
Example 1
###Code
A = np.array([[-6,3],[4,5]]) #creation of Matrix
print(A) #to print or display
w,v = np.linalg.eig(A) #Compute eigenvalues and right eigenvectors of a square array
#to print or display
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
###Output
[[-6 3]
[ 4 5]]
The eigenvalue/s is/are: [-7. 6.]
The right eigenvectors are: [[-0.9486833 -0.24253563]
[ 0.31622777 -0.9701425 ]]
###Markdown
Example 2
###Code
A = np.array([[2,2,4],[1,3,5],[2,3,4]]) #Creation of Matrix A
print(A) #to print or display
s,t = np.linalg.eig(A) #Compute eigenvalues and right eigenvectors of a square array
print(s.round())
print(t.round())
c = np.dot(A,t.round()) #dot product of A and t
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
###Output
[[-12 3]
[ 4 1]]
[[-0.04166667 0.125 ]
[ 0.16666667 0.5 ]]
[[0]
[0]]
[[0.]
[0.]]
###Markdown
Example 1
###Code
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",v)
###Output
[[-6 3]
[ 4 5]]
The eigenvalue/s is/are: [-7. 6.]
The right eigenvectors are: [[-0.9486833 -0.24253563]
[ 0.31622777 -0.9701425 ]]
###Markdown
Example 2
###Code
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
## Define Matrices
A = np.array([[-12, 3], [4, 1]])
inv_A = np.linalg.inv(A)
B = np.array([0, 0])
## Solve
X = np.linalg.solve(A, B)
print(X)
X = np.dot(inv_A, B)
print(X)
## Define Matrix A
A = np.array([[-6, 3], [4, 5]])
print("Vector A:\n", A)
## Solve for eigenvalues and eigenvectors
w, v = eig(A)
# v = v.round()
print("Eigenvalues:\n", w)
print("Eigenvectors:\n", v)
###Output
Vector A:
[[-6 3]
[ 4 5]]
Eigenvalues:
[-7. 6.]
Eigenvectors:
[[-0.9486833 -0.24253563]
[ 0.31622777 -0.9701425 ]]
###Markdown
Example 2
###Code
## Define Matrix A
A = np.array([[2, 2, 4], [1, 3, 5], [2, 3, 4]])
## Solve for eigenvalues and eigenvectors
w, v = eig(A)
print("Eigenvalues:\n", w)
print("Eigenvectors:\n", v)
w = w.round()
v = v.round()
print("Eigenvalues:\n", w)
print("Eigenvectors (1):\n", v)
c = np.dot(A ,v)
print(c)
###Output
Eigenvalues:
[ 8.80916362 0.92620912 -0.73537273]
Eigenvectors:
[[-0.52799324 -0.77557092 -0.36272811]
[-0.604391 0.62277013 -0.7103262 ]
[-0.59660259 -0.10318482 0.60321224]]
Eigenvalues:
[ 9. 1. -1.]
Eigenvectors (1):
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np #Import Library
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]]) #Creation of a 2x2 matrix named A
print(A) #Displays matrix A
inv_A = np.linalg.inv(A) #Inverse the values of matrix A
print(inv_A) #Displays the inverse of matrix A
B = np.array([[0],[0]]) #Creation of a 2x2 matrix named B
print(B) #Displays matrix B
X = np.dot(inv_A,B) #Dot product of the inverse of matrix A and matrix B
print(X) #Displays the dot product
#X = solve(A,B)
#print(X)
###Output
[[-12 3]
[ 4 1]]
[[-0.04166667 0.125 ]
[ 0.16666667 0.5 ]]
[[0]
[0]]
[[0.]
[0.]]
###Markdown
Example 1
###Code
A = np.array([[-6,3],[4,5]]) #Creation of a 2x2 matrix named A
print(A) #Displays matrix A
w,v = np.linalg.eig(A)
#Display the output
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
###Output
[[-6 3]
[ 4 5]]
The eigenvalue/s is/are: [-7. 6.]
The right eigenvectors are: [[-0.9486833 -0.24253563]
[ 0.31622777 -0.9701425 ]]
###Markdown
Example 2
###Code
A = np.array([[2,2,4],[1,3,5],[2,3,4]]) #Creation of a 3x3 matrix named A
print(A) #Displays matrix A
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
#Compute eigenvalues and right eigenvectors of a square array
c = np.dot(A,t.round()) #dot product of A and t
print(c) #Displays the final output
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
B = np.array([[0],[0]])
print(B)
inv_A = np.linalg.inv(A)
print(inv_A)
X = np.dot(inv_A,B)
print(X)
X = solve(A,B)
print(X)
#Example 1
A = np.array(([-6,3],[4,5]))
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are",w)
print("The right eigenvectors are",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from scipy.linalg import solve
#from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
X = solve(A,B)
print(X)
#Example 1
A = np.array([[6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalues is/are:",v)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A =np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
f = np.array([[-1],[-1],[-1]])
print(f)
g = np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from scipy.linalg import solve
#from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
X = solve(A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalues is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
f = np.array([[-1],[-1],[-1]])
print(f)
g = np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A= np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X= np.dot(inv_A,B)
print(X)
#X =solve(A,B)
#print(X)
#example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalues is/are",w)
print("The right eigenvectors are",v)
#x = v.round()
#print(x)
#example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
w,v = np.linalg.eig(A)
print(w.round())
print(v.round())
c =np.dot(A,v.round())
print(c)
f =np.array([[-1],[-1],[-1]])
print(f)
g =np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
#X = solve(A,B)
#print(X)
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
#print("The eigenvalue/s is/are:",s)
#print("The right eigenvectors are:",t)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
Examples
###Code
# Example 1
import numpy as np
#from numpy.linalg import solve
from numpy.linalg import eig
A = np.array([[-12,3],
[4,1]])
print(A, "\n")
inv_A = np.linalg.inv(A)
print(inv_A, "\n")
B = np.array([[0], [0]])
print(B, "\n")
X = np.dot(inv_A, B)
print(X, "\n")
# X = solve(A, B)
# print(X, "\n")
# Example 2
A = np.array([[2,2,4],
[1,3,5],
[2,3,4]])
print(A, "\n")
s,t = np.linalg.eig(A)
print(s.round(), "\n\n", t.round(), "\n")
c = np.dot(A,t)
print(c, "\n")
f = np.array([[-1],
[-1],
[-1]])
print(f, "\n")
g = np.dot(A, f)
print(g, "\n")
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A=np.array([[-12,3],[4,1]])
print(A)
inv_A=np.linalg.inv(A)
print(inv_A)
B=np.array([[0],[0]])
print(B)
X=np.dot(inv_A,B)
print(X)
#X=solve(A,B)
#print(X)
#Example 1
A=np.array([[-6,3],[4,5]])
print(A)
w,v=np.linalg.eig(A)
print("The eigenvalue/s is/are",w)
print("The right eigenvectors are",v)
x=v.round()
print(x)
#Example 2
A=np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t=np.linalg.eig(A)
print(s.round())
print(t.round())
c=np.dot(A,t.round())
print(c)
f=np.array([[-1],[-1],[-1]])
print(f)
g=np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np #Import Library
from numpy.linalg import eig
#from scipy.linalg import solve
A = np.array([[-12, 3],[4, 1]]) # create matrix A
print(f'Matrix A:\n {A}') # print matrix A
inv_A = np.linalg.inv(A) #inverse of matrix A
print(f'\nInverse of Matrix A:\n{inv_A}') #print the inverse of matrix A
B = np.array([[0],[0]]) #create matrix B
print(f'\nMatrix B:\n {B}') #print matrix B
X = np.dot(inv_A, B) #dot product of the inverse of matrix A and matrix B
print(f'\nDot product:\n{X}') #print the solved dot product - wrong
###Output
Matrix A:
[[-12 3]
[ 4 1]]
Inverse of Matrix A:
[[-0.04166667 0.125 ]
[ 0.16666667 0.5 ]]
Matrix B:
[[0]
[0]]
Dot product:
[[0.]
[0.]]
###Markdown
Example 1
###Code
A = np.array([[-6,3],[4,5]]) #creation of Matrix
print(f'Matrix A:\n{A}') #to print or display
w,v = np.linalg.eig(A) # use the eig method in order to get the eigenvalue and eigenvector of a square matrix A
#to print or display the eigenvalues and eigenvectors
print(f'The eigenvalue/s is/are:\n {w}')
print(f'The right eigenvectors are:\n {v}')
#x = v.round()
#print(x) # print the rounded eigenvectors
###Output
Matrix A:
[[-6 3]
[ 4 5]]
The eigenvalue/s is/are:
[-7. 6.]
The right eigenvectors are:
[[-0.9486833 -0.24253563]
[ 0.31622777 -0.9701425 ]]
###Markdown
Example 2
###Code
A = np.array([[2,2,4],[1,3,5],[2,3,4]]) #Creation of Matrix A
print(A) #to print or display
s,t = np.linalg.eig(A) # use the eig method in order to get the eigenvalue and eigenvector of a square matrix A
#print or display eigenvalues and eigenvectors
print(f'\nThe Eigenvalue/s is/are: {s.round()}')
print(f'\nThe right Eigenvectors are: \n{t.round()}\n')
c = np.dot(A,t.round()) #dot product of A and t
print(c) #to print or display
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
The Eigenvalue/s is/are: [ 9. 1. -1.]
The right Eigenvectors are:
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A=np.array([[-12,3],[4,1]])
print(A)
inv_A=np.linalg.inv(A)
print(inv_A)
B=np.array([[0],[0]])
print(B)
X=np.dot(inv_A,B)
print(X)
#X=solve(A,B)
#print(X)
#Example 1
A=np.array([[-6,3],[4,5]])
print(A)
w,v=np.linalg.eig(A)
print("The eigenvalue/s is/are",w)
print("The right eigenvectors are",v)
x=v.round()
print(x)
#Example 2
A=np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t=np.linalg.eig(A)
print(s.round())
print(t.round())
c=np.dot(A,t.round())
print(c)
f=np.array([[-1],[-1],[-1]])
print(f)
g=np.dot(A,f)
print(g)
###Output
_____no_output_____
###Markdown
###Code
import numpy as np
from scipy.linalg import solve
#from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
X = solve(A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalues is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from scipy.linalg import solve
#from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
X = solve(A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalues is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
f = np.array([[-1],[-1],[-1]])
print(f)
g = np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from scipy.linalg import solve
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
print()
inv_A = np.linalg.inv(A)
print(inv_A)
print()
B = np.array([[0],[0]])
print(B)
print()
X = np.dot(inv_A,B)
print(X)
print()
X = solve(A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
print()
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print()
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
print()
s,t = np.linalg.eig(A)
print(s.round())
print()
print(t.round())
print()
c = np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np #Import Library
from numpy.linalg import eig
A = np.array([[-12, 3],[4, 1]]) #matrix A
print(f'Matrix A:\n {A}') #print matrix A
inv_A = np.linalg.inv(A) #inverse of matrix A
print(f'\nInverse of Matrix A:\n{inv_A}') #print inverse of matrix A
B = np.array([[0],[0]]) #matrix B
print(f'\nMatrix B:\n {B}') #print matrix B
X = np.dot(inv_A, B) #dot product of inverse matrix A and matrix B
print(f'\nDot product:\n{X}') #print dot product - wrong
###Output
Matrix A:
[[-12 3]
[ 4 1]]
Inverse of Matrix A:
[[-0.04166667 0.125 ]
[ 0.16666667 0.5 ]]
Matrix B:
[[0]
[0]]
Dot product:
[[0.]
[0.]]
###Markdown
Example 2
###Code
A = np.array([[-6,3],[4,5]]) #matrix
print(f'Matrix A:\n{A}') #print/display
w,v = np.linalg.eig(A) # use the eig method in order to get the eigenvalue and eigenvector of a square matrix A
#print/display the eigenvalues and eigenvectors
print(f'The eigenvalue/s is/are:\n {w}')
print(f'The right eigenvectors are:\n {v}')
###Output
Matrix A:
[[-6 3]
[ 4 5]]
The eigenvalue/s is/are:
[-7. 6.]
The right eigenvectors are:
[[-0.9486833 -0.24253563]
[ 0.31622777 -0.9701425 ]]
###Markdown
Example 1
###Code
A = np.array([[2,2,4],[1,3,5],[2,3,4]]) #matrix A
print(A) #print/display
s,t = np.linalg.eig(A) #eig method to get the eigenvalue and eigenvector of square matrix A
#print/display eigenvalues and eigenvectors
print(f'\nThe Eigenvalue/s is/are: {s.round()}')
print(f'\nThe right Eigenvectors are: \n{t.round()}\n')
c = np.dot(A,t.round()) #dot product of A and t
print(c) #print/display
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
The Eigenvalue/s is/are: [ 9. 1. -1.]
The right Eigenvectors are:
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as LA
from numpy.linalg import eig
A = LA.array([[-12,3],[4,1]])
print(A)
inv_A = LA.linalg.inv(A)
print(inv_A)
B = LA.array([[0],[0]])
print(B)
A = LA.array([[-6,3],[4,5]])
print(A)
w,v = LA.linalg.eig(A)
print("The eigenvalue is/are",w)
print("The eigenvectors is/are",v)
A = LA.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = LA.linalg.eig(A)
print(s.round())
print(t.round())
c=LA.dot(A,t.round())
print(c)
f = LA.array([[-1],[-1],[-1]])
print(f)
g=LA.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
#X = solve(A,B)
#print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
print("")
inv_A = np.linalg.inv(A)
print(inv_A)
print("")
B = np.array([[0],[0]])
print(B)
print("")
X = np.dot(inv_A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are", w)
print("The right eigenvectors are:", v)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
f = np.array([[-1],[-1],[-1]])
print(f)
print(" ")
g = np.dot(A, f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
#X = solve(A,B)
#print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-5,3],[8,6]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B=np.array([[0],[0]])
print(B)
X=np.dot(inv_A,B)
print(X)
#Example 1
A=np.array([[-3,8],[4,9]])
print(A)
w,v=np.linalg.eig(A)
print("The eigenvalue/s is/are",w)
print("The eigenvector/s is/are",v)
#Example 2
A=np.array([[1,2,3],[4,5,6],[7,8,9]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w.round())
print("The eigenvector/s is/are:",v.round())
x=np.dot(A,v.round())
print(x)
X=np.array([[-1],[1],[-0]])
Y=np.array([[-1],[1],[-1]])
Z=np.array([[-1],[0],[-1]])
d=np.dot(A,X)
print(d)
e=np.dot(A,Y)
print(e)
f=np.dot(A,Z)
print(f)
###Output
[[1 2 3]
[4 5 6]
[7 8 9]]
The eigenvalue/s is/are: [16. -1. -0.]
The eigenvector/s is/are: [[-0. -1. 0.]
[-1. -0. -1.]
[-1. 1. 0.]]
[[ -5. 2. -2.]
[-11. 2. -5.]
[-17. 2. -8.]]
[[1]
[1]
[1]]
[[-2]
[-5]
[-8]]
[[ -4]
[-10]
[-16]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
print()
inv_A = np.linalg.inv(A)
print(inv_A)
print()
B = np.array([[0],[0]])
print(B)
print()
X = np.dot(inv_A,B)
print(X)
print()
X = solve(A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
print()
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print()
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
print()
s,t = np.linalg.eig(A)
print(s.round())
print()
print(t.round())
print()
c = np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B=np.array([[0],[0]])
print(B)
X=np.dot(inv_A,B)
print(X)
A=np.array([[-6,3],[4,5]])
print(A)
w,v=np.linalg.eig(A)
print("The eigenvalue/s is/are",w)
print("The eigenvector/s is/are",v)
A=np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w.round())
print("The eigenvector/s is/are:",v.round())
x=np.dot(A,v.round())
print(x)
X=np.array([[-1],[-1],[-0]])
Y=np.array([[-1],[1],[-1]])
Z=np.array([[-1],[-0],[-1]])
d=np.dot(A,X)
print(d)
e=np.dot(A,Y)
print(e)
f=np.dot(A,Z)
print(f)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
The eigenvalue/s is/are: [ 9. 1. -1.]
The eigenvector/s is/are: [[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
[[-4]
[-4]
[-5]]
[[-4]
[-3]
[-3]]
[[-6]
[-6]
[-6]]
###Markdown
###Code
import numpy as np #Import numpy lib
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]]) #Create a 2x2 matrix A
print(A) #Displays matrix A
inv_A = np.linalg.inv(A) #Inverse of Matrix A
print(inv_A) #Displays inverse of matrix A
B = np.array([[0],[0]]) #Create a 2x2 matrix B
print(B) #Displays matrix B
X = np.dot(inv_A,B) #Dot product of inverse of matrix A and matrix B
print(X) #Displays the dot product
#X = solve(A,B)
#print(X)
###Output
[[-12 3]
[ 4 1]]
[[-0.04166667 0.125 ]
[ 0.16666667 0.5 ]]
[[0]
[0]]
[[0.]
[0.]]
###Markdown
Example 1
###Code
A = np.array([[-6,3],[4,5]]) #Create a 2x2 matrix A
print(A) #Displays matrix A
w,v = np.linalg.eig(A)
#Display the output
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
###Output
[[-6 3]
[ 4 5]]
The eigenvalue/s is/are: [-7. 6.]
The right eigenvectors are: [[-0.9486833 -0.24253563]
[ 0.31622777 -0.9701425 ]]
###Markdown
Example 2
###Code
A = np.array([[2,2,4],[1,3,5],[2,3,4]]) #Create a 3x3 matrix A
print(A) #Displays matrix A
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
#Compute eigenvalues and right eigenvectors of a square array
c = np.dot(A,t.round()) #dot product of A and t
print(c) #Displays the final output
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
a = np.array([[-12,3],[4,1]])
b = np.array([[0],[0]])
x = np.linalg.solve(a,b)
print(x)
c = np.array([[-6,3],[4,5]])
w,v = np.linalg.eig(c)
print("eigenvalue:",w)
print("eigenvector:",v)
d = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(d)
s,t = np.linalg.eig(d)
print(s.round())
print(t.round())
p = np.dot(d,t.round())
print(p)
###Output
_____no_output_____
###Markdown
###Code
import numpy as np
from scipy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
#X = solve(A,B)
#print(X)
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print("The right eigenvector are:",v)
#X = v.round()
#print(X)
#Example2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t=np.linalg.eig(A)
print(s.round())
print(t.round())
c=np.dot(A,t.round())
print(c)
f = np.array([[0],[-1],[1]])
print(f)
g = np.dot(A,f)
print(g)
###Output
[[ 0]
[-1]
[ 1]]
[[2]
[2]
[1]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
print()
inv_A = np.linalg.inv(A)
print(inv_A)
print()
B = np.array([[0],[0]])
print(B)
print()
X = np.dot(inv_A,B)
print(X)
print()
X = solve(A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
print()
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print()
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
print()
s,t = np.linalg.eig(A)
print(s.round())
print()
print(t.round())
print()
c = np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np #Import numpy lib
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]]) #Create a 2x2 matrix A
print(A) #This is to display matrix A
inv_A = np.linalg.inv(A) #The inverse of Matrix A
print(inv_A) #This is to display inverse of matrix A
B = np.array([[0],[0]]) #Create a 2x2 matrix B
print(B) #This displays the matrix B
X = np.dot(inv_A,B) #Dot product of inverse of matrix A and matrix B
print(X) #Displays the dot product
#X = solve(A,B)
#print(X)
import numpy as np
A = np.array([[-6,3],[4,5]]) #Create a 2x2 matrix A
print(A) #Displays matrix A
w,v = np.linalg.eig(A)
#Display the output
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
import numpy as np
A = np.array([[2,2,4],[1,3,5],[2,3,4]]) #Create a 3x3 matrix A
print(A) #Displays matrix A
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
#Compute eigenvalues and right eigenvectors of a square array
c = np.dot(A,t.round()) #Dot product of A and t
print(c) #Final output
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
#X = solve(A,B)
#print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
f = np.array([[-1],[-1],[-1]])
print(f)
g = np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
#X = solve(A,B)
#print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A=np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t=np.linalg.eig(A)
#print("The eigenvalue/s is/are:",s)
#print("The right eigenvectors are:",t)
print(s.round())
print(t.round())
c=np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from scipy.linalg import solve
#from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
X = solve(A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalues is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
f = np.array([[-1],[-1],[-1]])
print(f)
g = np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
##Example 1
A=np.array([[-1,3],[2,5]])
print(A)
inv_A=np.linalg.inv(A)
print(inv_A)
B=np.array([[0],[0]])
print(B)
X=np.dot(inv_A,B)
print(X)
#Example 2
A=np.array([[4,2,0],[6,6,6],[0,6,9]])
print(A)
s,t=np.linalg.eig(A)
print(s.round())
print(t.round())
c=np.dot(A,t.round())
print(c)
f=np.array([[-1],[-1],[-1]])
print(f)
g=np.dot(A,f)
print(g)
###Output
_____no_output_____
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
X = solve(A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A=np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t=np.linalg.eig(A)
print(s.round())
print(t.round())
c=np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np #Import numpy lib
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]]) #Create a 2x2 matrix A
print(A) #Displays matrix A
inv_A = np.linalg.inv(A) #Inverse of Matrix A
print(inv_A) #Displays inverse of matrix A
B = np.array([[0],[0]]) #Create a 2x2 matrix B
print(B) #Displays matrix B
X = np.dot(inv_A,B) #Dot product of inverse of matrix A and matrix B
print(X) #Displays the dot product
#X = solve(A,B)
#print(X)
###Output
[[-12 3]
[ 4 1]]
[[-0.04166667 0.125 ]
[ 0.16666667 0.5 ]]
[[0]
[0]]
[[0.]
[0.]]
###Markdown
Example 1
###Code
A = np.array([[-6,3],[4,5]]) #Create a 2x2 matrix A
print(A) #Displays matrix A
w,v = np.linalg.eig(A)
#Display the output
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
###Output
[[-6 3]
[ 4 5]]
The eigenvalue/s is/are: [-7. 6.]
The right eigenvectors are: [[-0.9486833 -0.24253563]
[ 0.31622777 -0.9701425 ]]
###Markdown
Example 2
###Code
A = np.array([[2,2,4],[1,3,5],[2,3,4]]) #Create a 3x3 matrix A
print(A) #Displays matrix A
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
#Compute eigenvalues and right eigenvectors of a square array
c = np.dot(A,t.round()) #dot product of A and t
print(c) #Displays the final output
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
#from scipy.linalg import solve
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
X = solve(A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalues is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
f = np.array([[-1],[-1],[-1]])
print(f)
g = np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
Eigenvalue and EigenVector
###Code
import numpy as np
from scipy.linalg import solve
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
Y = solve(A,B)
print(Y)
##Eigen Method
#Example 1
import numpy as np
from numpy.linalg import eig
A = np.array([[-6,3],[4,5]])
print(A)
##w is for eigenvalue and v is for eigenvector
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are",v)
x=v.round()
print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are ",w)
print("The eigenvectors are ",v)
print(w.round())
print(v.round())
C = np.dot(A,v.round())
print(C)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
The eigenvalue/s is/are [ 8.80916362 0.92620912 -0.73537273]
The eigenvectors are [[-0.52799324 -0.77557092 -0.36272811]
[-0.604391 0.62277013 -0.7103262 ]
[-0.59660259 -0.10318482 0.60321224]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from scipy.linalg import solve
#from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
#X = solve(A,B)
#print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",w)
#Example 2
A = np.array ([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
f = np.array([[-1],[-1],[-1]])
print(f)
g=np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
X = solve(A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np #To import Library
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]]) #Making 2x2 Matrix (Named A)
print(A) #Displays A Matrix
inv_A = np.linalg.inv(A) #Inverse all the values of Matrix A
print(inv_A) #Displays the iversed output of Matrix A
B = np.array([[0],[0]]) #CMaking 2x2 Matrix (Named B)
print(B) #Displays B Matrix
X = np.dot(inv_A,B) #Dot product of the inverse of matrix A and matrix B
print(X) #Displays the dot product #X = solve(A,B) #print(X)
A = np.array([[-6,3],[4,5]]) #Making 2x2 Matrix (Named A)
print(A) #Displays A Matrix
w,v = np.linalg.eig(A)
#Displays the output
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
A = np.array([[2,2,4],[1,3,5],[2,3,4]]) #Making 3x3 Matrix (Named A)
print(A) #Displays A Matrix
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
#Computes the eigenvalues and right eigenvectors of a square array
c = np.dot(A,t.round()) #dot product of A and t
print(c) #Displays the final output
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
###Markdown
###Code
import numpy as np
from scipy.linalg import solve
#from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
X = solve(A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalues is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A = np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t = np.linalg.eig(A)
print(s.round())
print(t.round())
c = np.dot(A,t.round())
print(c)
f = np.array([[-1],[-1],[-1]])
print(f)
g = np.dot(A,f)
print(g)
###Output
[[-1]
[-1]
[-1]]
[[-8]
[-9]
[-9]]
###Markdown
###Code
import numpy as np
from numpy.linalg import eig
A = np.array([[-12,3],[4,1]])
print(A)
inv_A = np.linalg.inv(A)
print(inv_A)
B = np.array([[0],[0]])
print(B)
X = np.dot(inv_A,B)
print(X)
#Example 1
A = np.array([[-6,3],[4,5]])
print(A)
w,v = np.linalg.eig(A)
print("The eigenvalue/s is/are:",w)
print("The right eigenvectors are:",v)
#x = v.round()
#print(x)
#Example 2
A=np.array([[2,2,4],[1,3,5],[2,3,4]])
print(A)
s,t=np.linalg.eig(A)
print(s.round())
print(t.round())
c=np.dot(A,t.round())
print(c)
###Output
[[2 2 4]
[1 3 5]
[2 3 4]]
[ 9. 1. -1.]
[[-1. -1. -0.]
[-1. 1. -1.]
[-1. -0. 1.]]
[[-8. 0. 2.]
[-9. 2. 2.]
[-9. 1. 1.]]
|
Aula01.ipynb | ###Markdown
###Code
import pandas as pd
fonte = "https://github.com/alura-cursos/imersao-dados-2-2020/blob/master/MICRODADOS_ENEM_2019_SAMPLE_43278.csv?raw=true"
dados = pd.read_csv(fonte)
dados.head()
dados.shape
dados["SG_UF_RESIDENCIA"]
dados.columns.values
dados[["SG_UF_RESIDENCIA", "Q025"]]
dados["SG_UF_RESIDENCIA"]
dados["SG_UF_RESIDENCIA"].unique()
len(dados["SG_UF_RESIDENCIA"].unique())
dados["SG_UF_RESIDENCIA"].value_counts()
dados["NU_IDADE"].value_counts()
dados["NU_IDADE"].value_counts().sort_index()
dados["NU_IDADE"].hist()
dados["NU_IDADE"].hist(bins = 20, figsize = (10,8))
dados.query("IN_TREINEIRO == 1")["NU_IDADE"].value_counts().sort_index()
dados["NU_NOTA_REDACAO"].hist(bins = 20, figsize=(8, 6))
dados["NU_NOTA_LC"].hist(bins = 20, figsize=(8, 6))
dados["NU_NOTA_REDACAO"].mean()
dados["NU_NOTA_REDACAO"].std()
provas = ["NU_NOTA_CN","NU_NOTA_CH","NU_NOTA_MT","NU_NOTA_LC","NU_NOTA_REDACAO"]
dados[provas].describe()
dados["NU_NOTA_LC"].quantile(0.1)
dados["NU_NOTA_LC"].plot.box(grid = True, figsize=(8,6))
dados[provas].boxplot(grid=True, figsize= (10,8))
###Output
_____no_output_____
###Markdown
Desafio01: Proporção dos inscritos por idade.Desafio02: Descobrir de quais estados são os inscritos com 13 anos.Desafio03: Adicionar título no gráficoDesafio04: Plotar os Histogramas das idades dos do treineiro e não treineiros.Desafio05: Comparar as distribuições das provas em inglês espanholDesafio06: Explorar a documentações e visualizações com matplotlib ou pandas e gerar novas visualizações.
###Code
###Output
_____no_output_____
###Markdown
- Estudo QuarentenaDados (Sugestão do Kevin e bem interessante)Data: De 20 a 27/04/2020Pelo que vi até agora, a organização está da seguinte forma: Aula 1 - Recuperar os dados e realizar algumas análises estatísticas Aula 2 - Observar os dados via gráfico e alguns tratamentos sobre os dados Aula 3 - Mais sobre gráficos e com exploração de eventuais correlações Aula 4 - (A avaliar) Aula 5 - (A avaliar)
###Code
###Output
_____no_output_____
###Markdown
**Estudo 01 - Fontes e operações sobre as bases de dados**(Ideia) Quais os grandes repositórios? Existem bases de dados interessantes sobre educação e séries temporais em máquinas (Tenessee Eastman Process)(Ideia) Criar um GitHb para compartilhar as bases entre suas experiências parece interessante.Pontos chaves:- Carga / Compreender formato / Entender sua estrutura / Quais frameworks e suas estruturas típicas / Operações (subconjunto) típicas
###Code
# Carregar os dados
a = 10
print(a)
###Output
10
###Markdown
Carga dos dados através do PandasImportação / read_csv / operações com o Dataframe (colunas e linhas) / contabilização / outras úteisFonte interessante: Pandas Tutorial: DataFrames in Python / DataCamp
###Code
import pandas as pd
movies = pd.read_csv("https://raw.githubusercontent.com/otaciliojpereira/data-science/master/movies.csv")
# movies.head
# ?movies
avaliacoes = pd.read_csv("https://raw.githubusercontent.com/otaciliojpereira/data-science/master/ratings.csv")
# movies.columns = ['Id', 'Título', 'Gênero']
# movies.head
movies
# movies.mean()
len(movies)
###Output
_____no_output_____
###Markdown
Operações de consulta na tabela de dados / semelhante ao SQL- Query /
###Code
# avaliacoes.head()
# avaliacoes.describe()
# avaliacoes.query("userId==1").count()
# avaliacoes.head()
avaliacoes.groupby(['userId']).mean()
###Output
_____no_output_____
###Markdown
Aula01 0.0.Carregando Bibliotecas
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
1.0. Loading Data
###Code
data = pd.read_csv('kc_house_data.csv')
data.head()
data.dtypes
data['date']= pd.to_datetime(data['date'])
###Output
_____no_output_____
###Markdown
Perguntas feitas pelo CEO: 1.Quantas casas estão disponíveis para compra?
###Code
data['bedrooms'].isnull().sum()
21613
###Output
_____no_output_____
###Markdown
2.Quantos atributos as casas possuem?
###Code
# A contagem de Atributos não contabiliza a coluna 'id' e 'date'
len(data.columns.drop(['id','date']).tolist())
###Output
_____no_output_____
###Markdown
3.Quais são os atributos das casas?
###Code
data.columns.drop(['id','date'])
###Output
_____no_output_____
###Markdown
4.Qual a casa mais cara (casa com maior valor de venda)?
###Code
data[['id','price']].sort_values('price',ascending =False).reset_index()['id'][0]
###Output
_____no_output_____
###Markdown
5.Qual a casa com maior número de quartos?
###Code
data[['id','bedrooms']].sort_values('bedrooms',ascending =False).reset_index()['id'][0]
###Output
_____no_output_____
###Markdown
6.Qual a soma total de quartos do conjunto de dados?
###Code
data['bedrooms'].sum()
###Output
_____no_output_____
###Markdown
7.Quantas casas possuem 2 banheiros?
###Code
data[data['bathrooms']==2][['id','bathrooms']].value_counts().sum()
###Output
_____no_output_____
###Markdown
8.Qual o preço médio de todas as casas no conjunto de dados?
###Code
round(data['price'].mean(), 2)
###Output
_____no_output_____
###Markdown
9.Qual o preço médio de casas com 2 banheiros?
###Code
round(data[data['bathrooms']==2]['price'].mean(), 2)
###Output
_____no_output_____
###Markdown
10.Qual o preço mínimo entre as casas com 3 quartos?
###Code
data[data['bedrooms']==3]['price'].min()
###Output
_____no_output_____
###Markdown
11.Quantas casas possuem mais de 300 metros quadrados na sala de estar?
###Code
data['m2'] = 0.093 * data['sqft_living']
data[data['m2']>300][['id','m2']].value_counts().sum()
###Output
_____no_output_____
###Markdown
12.Quantas casas tem mais de 2 andares?
###Code
data[data['floors']>2][['id', 'floors']].value_counts().sum()
###Output
_____no_output_____
###Markdown
13.Quantas casas tem vista para o mar?
###Code
data[data['waterfront']==1]['waterfront'].value_counts().sum()
###Output
_____no_output_____
###Markdown
14.Das casas com vista para o mar, quantas tem 3 quartos?
###Code
data.loc[(data['waterfront']==1) & (data['bedrooms']==3), 'id'].value_counts().sum()
###Output
_____no_output_____
###Markdown
15.Das casas com mais de 300 metros quadrados de sala de estar e mais de dois banheiros?
###Code
data.loc[(data['m2']>300)&(data['bathrooms']>2), 'id'].value_counts().sum()
###Output
_____no_output_____
###Markdown
###Code
###Output
_____no_output_____
###Markdown
Comando Print()
###Code
print('Olá Mundo')
print("Palmenras 'não' tem Mundial")
7*8
print("Olá Mundo")
print(25//4)
a
###Output
_____no_output_____
###Markdown
Introdução à Data Science e Machine Learning - Data ICMC-USP Prática Aula 01 - k-Nearest NeighborsEsse material foi desenvolvido pelo **Data**, grupo de extensão de aprendizado e ciência de dados compostos por alunos do Instituto de Ciências Matemáticas e de Computação da USPPara saber mais sobre as atividades do Data entre no nosso site e nos siga e nossas redes sociais:- [Site](http://data.icmc.usp.br/)- [Twitter](https://twitter.com/data_icmc)- [LinkedIn](https://www.linkedin.com/school/data-icmc/)- [Facebook](https://www.facebook.com/dataICMC/)Aproveite o material!
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
###Output
_____no_output_____
###Markdown
Vamos começar carregando os dados que iremos usar no nossa tarefa. Esses dados fornecem várias informações a respeito de diferentes vinhos e o objetivo é classificar se o vinho é bom (target é a coluna *is_good*).Esse conjunto de dados é uma modificação do conjunto
###Code
df = None
##############################################################
# PREENCHA AQUI: #
# - Leia os dados de data.csv com pd.read_csv e guarde #
# na variável df #
##############################################################
df = pd.read_csv('data_aula_01.csv')
##############################################################
df.head()
##############################################################
# PREENCHA AQUI: #
# - Guarde o shape do DataFrame na viarável shape #
##############################################################
shape = df.shape
##############################################################
print(shape)
for col in df.columns:
print(col)
###Output
fixed acidity
volatile acidity
citric acid
residual sugar
chlorides
free sulfur dioxide
total sulfur dioxide
density
pH
sulphates
alcohol
is good
###Markdown
Deixando os dados na mesma escalaPara vários algoritmos é importante deixarmos os dados em uma mesma escala, e o kNN um desses casos. Para entender melhor vamos olhar o exemplo a seguir:Nesse caso a distância entre os dois pontos é dada por$$\begin{align*}\text{dist}(x^{(1)}, x^{(2)}) &= \sqrt{(x^{(1)}_1 - x^{(2)}_1)^2 + (x^{(1)}_2 - x^{(2)}_2)^2} \\ &= \sqrt{(3 - 2)^2 + (10000 - 9000)^2} \\ &= \sqrt{1 + 1000000} \\ &= \sqrt{1000001} \\ &= 1000.0005\end{align*}$$Como as escalas são muito diferentes o primeiro atributo acaba não interferindo em praticamente nada no resultado da distância. E é importante perceber que esse tipo de situação ocorre com frequência em conjuntos de dados reais.Existem diversas formas de tratar essa situação, aqui usaremos uma técnica chamada **Min-Max Scaling**, que transforma os dados deixando-os no intervalo $[0, 1]$. A formula é da transformação é a seguinte:$$x^{(i)}_j \leftarrow \frac{x^{(i)}_j - min(x_j)}{max(x_j) - min(x_j)}$$Em palavras significa que vamos subtrair o menor valor da atributo e dividir pela amplitude (diferença entre o máximo e o mínimo).Pronto, agora que entendemos podemos fazer fazer isso para todas as nossas colunas utilizando a função interna do scikit-learn
###Code
scaler = MinMaxScaler()
scaler.fit(df)
df = pd.DataFrame(scaler.transform(df), columns=df.columns)
###Output
_____no_output_____
###Markdown
Divisão dos dados em treino e validação
###Code
target = 'is good'
features = df.columns.to_list()
features.remove(target)
X_train, X_val, y_train, y_val = train_test_split(df[features], df[target], test_size=0.2, random_state=0)
print(X_train.shape)
print(y_train.shape)
print(X_val.shape)
print(y_val.shape)
###Output
(1279, 11)
(1279,)
(320, 11)
(320,)
###Markdown
Treinando um modelo
###Code
clf = None
y_pred = None
##############################################################
# PREENCHA AQUI: #
# - Instancie um KNeighborsClassifier na variável clf #
# - Treine o classificador com X_train e y_train #
# - Faça a predições para os dados de validade e salve #
# em y_pred #
##############################################################
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_val)
##############################################################
###Output
_____no_output_____
###Markdown
Avaliando o modelo treinado
###Code
acc = None
##############################################################
# PREENCHA AQUI: #
# - Calcule a acurácia do modelo que você treinou usando a #
# função accuracy_score, salve o resultado e o imprima #
##############################################################
acc = accuracy_score(y_val, y_pred)
##############################################################
print(f'A acurácia foi de {acc * 100:.2f}%')
###Output
A acurácia foi de 71.56%
###Markdown
Explorando variações no modelo Número de vizinhosO principal hiperparâmetro do kNN é justamente o número de vizinhos, representado pelo k. Por padrão o `KNeighborsClassifier()` usa cinco vizinhos, através de seu parâmetro `n_neighbors` é possível alterar este valor. Métrica de distânciaComo vimos na aula, é possível utilizar diferentes metricas de distancia entre pontos, e vimos as duas seguintes:- Distância Euclidiana => $dist(a, b) = \sqrt{\sum_i (a_i - b_i)^2}$- Distância Manhattan => $dist(a, b) = \sum_i |a_i - b_i|$O sklearn, por outro lado, faz uso de uma generalização destas duas distâncias, chamada distância **Minkowski** =>$dist(a, b) = (\sum_i |a_i - b_i|^p)^\frac{1}{p}$. Perceba que com $p=2$ temos a distância Euclidiano e com $p=1$ temos a distância Manhattan. Por padrão a classe `KNeighborsClassifier()` usa `p=2`.
###Code
n_vizinhos = [3, 5, 7, 9, 11, 13]
resultados = []
for k in n_vizinhos:
##############################################################
# PREENCHA AQUI: #
# - Crie um kNN com k vizinhos e utilizando distância #
# Manhattan #
# - Treine esse modelo com X_train e y_train #
# - Calcule a acurácia do modelo que você treinou e salve #
# o resultado na lista resultados #
##############################################################
clf = KNeighborsClassifier(n_neighbors=k, p=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_val)
acc = accuracy_score(y_val, y_pred)
resultados.append(acc)
##############################################################
for k, acc in zip(n_vizinhos, resultados):
print(f'{k:02d} vizinhos => Acurácia {acc * 100:.2f}%')
###Output
03 vizinhos => Acurácia 71.56%
05 vizinhos => Acurácia 66.56%
07 vizinhos => Acurácia 71.56%
09 vizinhos => Acurácia 74.06%
11 vizinhos => Acurácia 71.88%
13 vizinhos => Acurácia 73.12%
###Markdown
###Code
import pandas as pd
fonte = "https://github.com/alura-cursos/imersao-dados-2-2020/blob/master/MICRODADOS_ENEM_2019_SAMPLE_43278.csv?raw=true"
dados = pd.read_csv(fonte)
dados.head()
dados.shape
dados["SG_UF_RESIDENCIA"]
dados.columns.values
dados[["SG_UF_RESIDENCIA", "Q025"]]
dados["SG_UF_RESIDENCIA"]
dados["SG_UF_RESIDENCIA"].unique()
len(dados["SG_UF_RESIDENCIA"].unique())
dados["SG_UF_RESIDENCIA"].value_counts()
dados["NU_IDADE"].value_counts()
dados["NU_IDADE"].value_counts().sort_index()
dados["NU_IDADE"].hist()
dados["NU_IDADE"].hist(bins = 20, figsize = (10,8))
dados.query("IN_TREINEIRO == 1")["NU_IDADE"].value_counts().sort_index()
dados["NU_NOTA_REDACAO"].hist(bins = 20, figsize=(8, 6))
dados["NU_NOTA_LC"].hist(bins = 20, figsize=(8, 6))
dados["NU_NOTA_REDACAO"].mean()
dados["NU_NOTA_REDACAO"].std()
provas = ["NU_NOTA_CN","NU_NOTA_CH","NU_NOTA_MT","NU_NOTA_LC","NU_NOTA_REDACAO"]
dados[provas].describe()
dados["NU_NOTA_LC"].quantile(0.1)
dados["NU_NOTA_LC"].plot.box(grid = True, figsize=(8,6))
dados[provas].boxplot(grid=True, figsize= (10,8))
###Output
_____no_output_____
###Markdown
Desafio01: Proporção dos inscritos por idade.Desafio02: Descobrir de quais estados são os inscritos com 13 anos.Desafio03: Adicionar título no gráficoDesafio04: Plotar os Histogramas das idades dos do treineiro e não treineiros.Desafio05: Comparar as distribuições das provas em inglês espanholDesafio06: Explorar a documentações e visualizações com matplotlib ou pandas e gerar novas visualizações.
###Code
###Output
_____no_output_____
###Markdown
###Code
import sys
print(sys.version_info.serial)
print(sys.version_info.major)
print(sys.version_info.minor)
print(sys.version.title())
# Comando para mostrar uma string na tela
# print (arg1, ..., argN)
# os argumentos podem ser de qualquer tipo
print ("Olá Mundo")
# Entrada de um valor pelo teclado
# input(prompt)
# Lê valores do teclado, mostrando o prompt antes da pergunta
input("Digite seu nome : ")
input("Digite a sua idade : ")
a=10
print ("O valor de A = ",a)
print ("O valor de A = ",A) # Note que o nome da variável é a e não A
a=10 # variável numérica
nome="Fulano de tal" # variável string
print("Meu nome é:", nome, "e minha idade é:", a, "anos")
a=input("Digite um número:")
b=input("Digite outro número:")
simbolo="<"
if ( a == b):
simbolo="=" # Se o valor de A for igual ao valor de B
else:
if ( a > b): # Se o valor de A for diferente do valor d B
simbolo=">" # Se o valor de A for maior que o valor de B
print(a,simbolo,b)
nome="Fulano de tal"
nomes=nome.split(" ")
for n in nomes:
print(n)
###Output
_____no_output_____
###Markdown
LINHAS DE **CÓDIGO** DO PROGRAMA
###Code
###Output
_____no_output_____
###Markdown
Título em **negrito**. Sub-título em *itálico*.Texto ~tachado~. **Passo 03) Inserindo equações matemáticas nos textos de anotações** Exemplo 1:$x ∈ [-5, 5]$ Exemplo 2:$\sqrt{3x-1}+(1+x)^2$ Exemplo 3:$e^x=\sum_{i=0}^\infty \frac{1}{i!}x^i$ Exmeplo 4: - $3x_1 + 6x_2 + x_3 =< 28$ - $7x_1 + 3x_2 + 2x_3 =< 37$ - $4x_1 + 5x_2 + 2x_3 =< 19$ - $x_1,x_2,x_3 >=0 $ Exemplo 5: Vetores $u_i(t) = x_i(t) + \beta(\hat{x}(t) − x_i(t)) + \beta \sum_{k = 1}^{n _v}(x_{i1,k}(t) − x_{i2,k}(t))$ $f(x_1, x_2) = 20 + e - 20exp(-0.2 \sqrt {\frac {1}{n} (x_1^2 + x_2^2)}) - exp(\frac {1}{n}(cos(2\pi x_1) + cos(2\pi x_2))$ Exemplo 6: Matriz>$A_{m,n} =\begin{pmatrix} a_{1,1} & a_{1,2} & \cdots & a_{1,n} \\ a_{2,1} & a_{2,2} & \cdots & a_{2,n} \\ \vdots & \vdots & \ddots & \vdots \\ a_{m,1} & a_{m,2} & \cdots & a_{m,n}\end{pmatrix}$ **Passo 09) Princípios básicos do Python**
###Code
###Output
_____no_output_____
###Markdown
Isto é um TÍTULO PRINCIPALSUBTÍTULO 1SUBTITULO 2
###Code
n1 = float(input("Digite o valor 1: "))
n2 = float(input("Digite o valor 2: "))
n3 = 20
n4 = n1 + n2 + n3
print ("Soma: " + str(n4))
#Se vc usar int obrigatório usar inteiro; logo, dará erro ao usar numero com ponto flutuante
n1 = int(input("Digite o valor 1: "))
n2 = int(input("Digite o valor 2: "))
n3 = 20
n4 = n1 + n2 + n3
print ("Soma: " + str(n4))
n3 = 20
n4 = n3 * 10
print("Resultado n3*n4 = " + str(n4))
print (n4)
n3 = "Mack"
print (n3)
x = 2 #tipo numérico
p = 3.1415 #tipo ponto flutuante
verdadeiro = True #tipo booleano
texto = 'isto é uma string' #tipo string
texto1 = "isto também é uma string" #tipo string
c = 3 + 2j #tipo número complexo
print(x)
print(p)
print(verdadeiro)
print(texto)
print(texto1)
peso = 101
altura = 1.80
ponto = '.'
#Para cada um dos comandos abaixo, indique o resultado da expressão e o tipo de cada um deles.
print(peso/2)
print(peso/2.0)
print(altura/3)
print(1 + 2 * 5)
print(ponto * 5)
#O que será exibido na tela?
x = 'aa'
y = x * 10
print(y)
v1 = 10.5
v2 = 10
v3 = "true"
v4 = True
v5 = int(v1)
v6 = float(v2)
v7 = type(v3)
v8 = type(v4)
print(v1)
print(v2)
print(v3)
print(v4)
print(v5)
print(v6)
print(v7)
print(v8)
# OPERADORES ARITMÉTICOS
print(2+3)
print(2-3)
print(2*3)
print(2/3)
print(2//3)
print(2%3)
print(2**3)
###Output
5
-1
6
0.6666666666666666
0
2
8
###Markdown
O Python tem algumas funções prontas que executam determinadas instruções. Uma função nada mais é que um nome, seguido de argumentos que são enviados como parâmetro de entrada para a função. Uma função pode ter mais de um argumento, que são separados por vírgula. Veja o código abaixo e indique quais são as funções utilizadas:
###Code
def soma():
x = float(input("Primeiro numero: "))
y = float(input("Segundo numero: "))
soma = x + y
print("Soma: ",soma)
continuar=1
while continuar:
if(continuar):
soma()
continuar=int(input("Digite 0 se desejar encerrar ou qualquer outro numero para continuar: "))
def soma(x, y):
soma = x + y
print("Soma: ",soma)
soma(8,3)
###Output
Soma: 11
|
Tutorials/tuto_Simulation_FEMM.ipynb | ###Markdown
How to define a simulation to call FEMMThis tutorial shows the different steps to **compute magnetic flux and electromagnetic torque** with pyleecan **automated coupling with FEMM**. FEMM must be installed for this tutorial. This tutorial was tested with the release [21Apr2019 of FEMM](http://www.femm.info/wiki/Download). Please note that the coupling with FEMM is only available on Windows. The notebook related to this tutorial is available on [GitHub](https://github.com/Eomys/pyleecan/tree/master/Tutorials/tuto_Simulation_FEMM.ipynb).Every electrical machine defined in Pyleecan can be automatically drawn in [FEMM](http://www.femm.info/wiki/HomePage) to compute torque, airgap flux and electromotive force. To do so, the tutorial is divided into four parts: - defining or loading the machine - defining the simulation inputs - setting up and running of the magnetic solver - plotting of the magnetic flux for the first time step Defining or loading the machineThe first step is to define the machine to simulate. For this tutorial we use the Toyota Prius 2004 machine defined in [this tutorial](https://www.pyleecan.org/tuto_Machine.html).
###Code
%matplotlib notebook
# Load the machine
from os.path import join
from pyleecan.Functions.load import load
from pyleecan.definitions import DATA_DIR
IPMSM_A = load(join(DATA_DIR, "Machine", "IPMSM_A.json"))
IPMSM_A.plot()
###Output
_____no_output_____
###Markdown
Simulation definition InputsThe simulation is defined with a [**Simu1**](http://www.pyleecan.org/pyleecan.Classes.Simu1.html) object. This object correspond to a simulation with 5 sequential physics (or modules):- electrical - magnetic - force - structural - acoustic [**Simu1**](http://www.pyleecan.org/pyleecan.Classes.Simu1.html) object enforce a weak coupling between each physics: the input of each physic is the output of the previous one.In this tutorial we will focus only on the magnetic module. The Magnetic physic is defined with the object [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) and the other physics are desactivated (set to None). We define the starting point of the simulation with an [**InputCurrent**](http://www.pyleecan.org/pyleecan.Classes.InputCurrent.html) object to enforce the electrical module output with:- angular and the time discretization - rotor speed - stator currents
###Code
from numpy import ones, pi, array, linspace
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.InputCurrent import InputCurrent
from pyleecan.Classes.MagFEMM import MagFEMM
# Create the Simulation
mySimu = Simu1(name="EM_SIPMSM_AL_001", machine=IPMSM_A)
# Defining Simulation Input
mySimu.input = InputCurrent()
# Rotor speed [rpm]
mySimu.input.N0 = 2000
# time discretization [s]
mySimu.input.time = linspace(start=0, stop=60/mySimu.input.N0, num=16, endpoint=False) # 16 timesteps
# Angular discretization along the airgap circonference for flux density calculation
mySimu.input.angle = linspace(start = 0, stop = 2*pi, num=2048, endpoint=False) # 2048 steps
# Stator currents as a function of time, each column correspond to one phase [A]
mySimu.input.Is = array(
[
[ 1.77000000e+02, -8.85000000e+01, -8.85000000e+01],
[ 5.01400192e-14, -1.53286496e+02, 1.53286496e+02],
[-1.77000000e+02, 8.85000000e+01, 8.85000000e+01],
[-3.25143725e-14, 1.53286496e+02, -1.53286496e+02],
[ 1.77000000e+02, -8.85000000e+01, -8.85000000e+01],
[ 2.11398201e-13, -1.53286496e+02, 1.53286496e+02],
[-1.77000000e+02, 8.85000000e+01, 8.85000000e+01],
[-3.90282030e-13, 1.53286496e+02, -1.53286496e+02],
[ 1.77000000e+02, -8.85000000e+01, -8.85000000e+01],
[ 9.75431176e-14, -1.53286496e+02, 1.53286496e+02],
[-1.77000000e+02, 8.85000000e+01, 8.85000000e+01],
[-4.33634526e-13, 1.53286496e+02, -1.53286496e+02],
[ 1.77000000e+02, -8.85000000e+01, -8.85000000e+01],
[ 4.55310775e-13, -1.53286496e+02, 1.53286496e+02],
[-1.77000000e+02, 8.85000000e+01, 8.85000000e+01],
[-4.76987023e-13, 1.53286496e+02, -1.53286496e+02]
]
)
###Output
_____no_output_____
###Markdown
The stator currents are enforced as a function of time for each phase. The current can also be enforced sinusoïdal by using Id_ref/Iq_ref as explained in the [How to set the Operating Point tutorial](https://www.pyleecan.org/tuto_Operating_point.html). MagFEMM configurationFor the configuration of the Magnetic module, we use the object [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) that compute the airgap flux density by calling FEMM. The model parameters are set though the properties of the [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) object. In this tutorial we will present the main ones, the complete list is available by looking at [**Magnetics**](http://www.pyleecan.org/pyleecan.Classes.Magnetics.html) and [**MagFEMM**](http://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) classes documentation.*type_BH_stator* and *type_BH_rotor* enable to select how to model the B(H) curve of the laminations in FEMM. The material parameter and in particular the B(H) curve are setup directly [in the machine](https://www.pyleecan.org/tuto_Machine.html).
###Code
from pyleecan.Classes.MagFEMM import MagFEMM
mySimu.mag = MagFEMM(
type_BH_stator=0, # 0 to use the material B(H) curve,
# 1 to use linear B(H) curve according to mur_lin,
# 2 to enforce infinite permeability (mur_lin =100000)
type_BH_rotor=0, # 0 to use the material B(H) curve,
# 1 to use linear B(H) curve according to mur_lin,
# 2 to enforce infinite permeability (mur_lin =100000)
file_name = "", # Name of the file to save the FEMM model
)
# We only use the magnetic part
mySimu.force = None
mySimu.struct = None
###Output
_____no_output_____
###Markdown
Pyleecan coupling with FEMM enables to define the machine with symmetry and with sliding band to optimize the computation time. The angular periodicity of the machine will be computed and (in the particular case) only 1/8 of the machine (4 symmetry + antiperiodicity):
###Code
mySimu.mag.is_periodicity_a=True
###Output
_____no_output_____
###Markdown
At the end of the simulation, the mesh and the solution can be saved in the **Output** object with:
###Code
mySimu.mag.is_get_mesh = True # To get FEA mesh for latter post-procesing
mySimu.mag.is_save_FEA = False # To save FEA results in a dat file
###Output
_____no_output_____
###Markdown
Run simulation
###Code
myResults = mySimu.run()
###Output
_____no_output_____
###Markdown
When running the simulation, a FEMM window should open so you can see pyleecan drawing the machine and defining the surfaces. The simulation will compute 16 different timesteps by updating the current and the sliding band boundary condition.Once the simulation is finished, an Output object is return. The results are stored in the magnetic part of the output (i.e. _myResults.mag_ ) and different plots can be called. This _myResults.mag_ contains: - *time*: magnetic time vector without symmetry - *angle*: magnetic position vector without symmetry - *B*: airgap flux density (contains radial and tangential components) - *Tem*: electromagnetic torque - *Tem_av*: average electromagnetic torque- *Tem_rip_pp* : Peak to Peak Torque ripple- *Tem_rip_norm*: Peak to Peak Torque ripple normalized according to average torque - *Phi_wind_stator*: stator winding flux - *emf*: electromotive force Plot results**Output** object embbed different plot to visualize results easily. A dedicated tutorial is available [here](https://www.pyleecan.org/tuto_Plots.html).For instance, the radial and tangential magnetic flux in the airgap at a specific timestep can be plotted with:
###Code
# Radial magnetic flux
myResults.plot_2D_Data("mag.B","angle","time[1]",component_list=["radial"])
myResults.plot_2D_Data("mag.B","wavenumber=[0,76]","time[1]",component_list=["radial"])
# Tangential magnetic flux
myResults.plot_2D_Data("mag.B","angle","time[1]",component_list=["tangential"])
myResults.plot_2D_Data("mag.B","wavenumber=[0,76]","time[1]",component_list=["tangential"])
###Output
_____no_output_____
###Markdown
If the mesh was saved in the output object (mySimu.mag.is_get_mesh = True), it can be plotted with:
###Code
myResults.mag.meshsolution.plot_contour(label="B", group_names="stator")
###Output
_____no_output_____
###Markdown
Finally, it is possible to extend pyleecan by implementing new plot by using the results from output. For instance, the following plot requires plotly to display the radial flux density in the airgap over time and angle.
###Code
#%run -m pip install plotly # Uncomment this line to install plotly
import plotly.graph_objects as go
from plotly.offline import init_notebook_mode
init_notebook_mode()
result = myResults.mag.B.get_rad_along("angle{°}", "time")
x = result["angle"]
y = result["time"]
z = result["B_r"]
fig = go.Figure(data=[go.Surface(z=z, x=x, y=y)])
fig.update_layout( )
fig.update_layout(title='Radial flux density in the airgap over time and angle',
autosize=True,
scene = dict(
xaxis_title='Angle [°]',
yaxis_title='Time [s]',
zaxis_title='Flux [T]'
),
width=700,
margin=dict(r=20, b=100, l=10, t=100),
)
fig.show(config = {"displaylogo":False})
###Output
_____no_output_____
###Markdown
How to define compute magnetic flux with FEMMThis tutorial shows the how to **compute magnetic flux and torque with FEMM**. The notebook related to this tutorial is available on [GitHub](https://github.com/Eomys/pyleecan/tree/master/Tutorials/tuto_Simulation_FEMM.ipynb).This tutorial is divided into four parts: - machine import- winding definition- magnetic simulation definition and running- plot of the magnetic flux for the first time step Loading MachineBefore defining the simulation, one first has to define the machine. For this example we import the Toyota Prius 2004 defined in [this tutorial](https://www.pyleecan.org/tuto_Machine.html).
###Code
# Add pyleecan to the Python path
import sys
sys.path.append('../..')
from pyleecan.Functions.load import load
# from pyleecan.Tests.Validation.Machine.SPMSM_003 import SPMSM_003 as IPMSM_A
# from pyleecan.Tests.Validation.Machine.IPMSM_A import IPMSM_A as IPMSM_A
# Import the machine from a script
IPMSM_A = load('../Data/Machine/IPMSM_A.json')
%matplotlib notebook
im=IPMSM_A.plot()
###Output
_____no_output_____
###Markdown
Simulation definition Input currentsTo define the simulation, we use [Simu1](http://www.pyleecan.com/pyleecan.Classes.Simu1.html) and [InCurrent](http://www.pyleecan.com/pyleecan.Classes.InCurrent.html) to define the input such as stator currents, the angular and the time discretization.
###Code
import numpy as np
from numpy import ones, pi, array, linspace
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.InCurrent import InCurrent
from pyleecan.Classes.MagFEMM import MagFEMM
# Create the Simulation
mySimu = Simu1(name="EM_SIPMSM_AL_001", machine=IPMSM_A)
# Defining Simulation Input
mySimu.input = InCurrent()
# Electrical time vector without symmetry [s]
mySimu.input.time.value= np.linspace(start=0, stop=0, num=1, endpoint=False)
# Angular steps along the airgap circonference for flux density calculation
mySimu.input.angle.value = np.linspace(start = 0, stop = 2*np.pi, num=2048, endpoint=False) # 2048 steps
# Rotor speed as a function of time [rpm]
mySimu.input.Nr.value = ones(1) * 2504
# Stator currents as a function of time, each column correspond to one phase [A]
mySimu.input.Is.value = array(
[
[0, 12.2474, -12.2474],
]
)
###Output
_____no_output_____
###Markdown
To call FEMM, we need to define the magnetic part of the simulation with [MagFEMM](http://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) class. As the simulation will only consider magnetic problem, we set the structural part as None to avoid computation.
###Code
from pyleecan.Classes.MagFEMM import MagFEMM
# Definition of the magnetic simulation (is_mmfr=False => no flux from the magnets)
mySimu.mag = MagFEMM(
is_stator_linear_BH=0, # 0 to use the B(H) curve,
# 1 to use linear B(H) curve according to mur_lin,
# 2 to enforce infinite permeability (mur_lin =100000)
is_rotor_linear_BH=0, # 0 to use the B(H) curve,
# 1 to use linear B(H) curve according to mur_lin,
# 2 to enforce infinite permeability (mur_lin =100000)
is_symmetry_a=True, # 0 Compute on the complete machine, 1 compute according to sym_a and is_antiper_a
sym_a = 4, # Number of symmetry for the angle vector
is_antiper_a=True, # To add an antiperiodicity to the angle vector
angle_stator=-np.pi / 6, # Angular position shift of the stator
)
mySimu.struct = None # We only use the magnetic part
###Output
_____no_output_____
###Markdown
You can find all the parameters of _MagFEMM_ by looking at [Magnetics](http://www.pyleecan.org/pyleecan.Classes.Magnetics.html) and [MagFEMM](http://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) classes. Run simulationTo run the simulation, we first have to set the Output to store the results.
###Code
from pyleecan.Classes.Output import Output
myResults = Output(simu=mySimu)
mySimu.run()
###Output
_____no_output_____
###Markdown
Once it is done, the results are stored in the magnetic part of the output (i.e. _out.mag_ ) and one can call different plots :
###Code
%matplotlib notebook
myResults.plot_B_space()
###Output
_____no_output_____
###Markdown
Version information
###Code
from datetime import date
print("Running date:", date.today().strftime("%B %d, %Y"))
import pyleecan
print("Pyleecan version:" + pyleecan.__version__)
import SciDataTool
print("SciDataTool version:" + SciDataTool.__version__)
###Output
_____no_output_____
###Markdown
How to define a simulation to call FEMMThis tutorial shows the different steps to **compute magnetic flux and electromagnetic torque** with Pyleecan **automated coupling with FEMM**. This tutorial was tested with the release [21Apr2019 of FEMM](http://www.femm.info/wiki/Download). Please note that the coupling with FEMM is only available on Windows. The notebook related to this tutorial is available on [GitHub](https://github.com/Eomys/pyleecan/tree/master/Tutorials/tuto_Simulation_FEMM.ipynb).Every electrical machine defined in Pyleecan can be automatically drawn in FEMM to compute torque, airgap flux and electromotive force. Defining or loading the machineThe first step is to define the machine to simulate. For this tutorial we use the Toyota Prius 2004 machine defined in [this tutorial](https://www.pyleecan.org/tuto_Machine.html).
###Code
%matplotlib notebook
# Load the machine
from os.path import join
from pyleecan.Functions.load import load
from pyleecan.definitions import DATA_DIR
IPMSM_A = load(join(DATA_DIR, "Machine", "Toyota_Prius.json"))
IPMSM_A.plot()
###Output
_____no_output_____
###Markdown
Simulation definition InputsThe simulation is defined with a [**Simu1**](http://www.pyleecan.org/pyleecan.Classes.Simu1.html) object. This object corresponds to a simulation with 5 sequential physics (or modules):- electrical - magnetic - force - structural - acoustic Each physics/modules can have several models to solve them. For now pyleecan includes:- an Electrical model for PMSM machine with FEMM- a Magnetic model with FEMM for all machines- a Force model (Maxwell Tensor)- Magnetic and Structural models with GMSH/Elmer[**Simu1**](http://www.pyleecan.org/pyleecan.Classes.Simu1.html) object enforces a weak coupling between each physics: the input of each physic is the output of the previous one.The Magnetic physics is defined with the object [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) and the other physics are deactivated (set to None). We define the starting point of the simulation with an [**InputCurrent**](http://www.pyleecan.org/pyleecan.Classes.InputCurrent.html) object to enforce the electrical module output with:- angular and the time discretization - rotor speed - stator currents
###Code
from os.path import join
from numpy import ones, pi, array, linspace, cos, sqrt
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.InputCurrent import InputCurrent
from pyleecan.Classes.MagFEMM import MagFEMM
# Create the Simulation
simu_femm = Simu1(name="FEMM_simulation", machine=IPMSM_A)
p = simu_femm.machine.stator.winding.p
qs = simu_femm.machine.stator.winding.qs
# Defining Simulation Input
simu_femm.input = InputCurrent()
# Rotor speed [rpm]
simu_femm.input.N0 = 2000
# time discretization [s]
time = linspace(start=0, stop=60/simu_femm.input.N0, num=32*p, endpoint=False) # 32*p timesteps
simu_femm.input.time = time
# Angular discretization along the airgap circonference for flux density calculation
simu_femm.input.angle = linspace(start = 0, stop = 2*pi, num=2048, endpoint=False) # 2048 steps
# Stator currents as a function of time, each column correspond to one phase [A]
I0_rms = 250/sqrt(2)
felec = p * simu_femm.input.N0 /60 # [Hz]
rot_dir = simu_femm.machine.stator.comp_mmf_dir()
Phi0 = 140*pi/180 # Maximum Torque Per Amp
Ia = (
I0_rms
* sqrt(2)
* cos(2 * pi * felec * time + 0 * rot_dir * 2 * pi / qs + Phi0)
)
Ib = (
I0_rms
* sqrt(2)
* cos(2 * pi * felec * time + 1 * rot_dir * 2 * pi / qs + Phi0)
)
Ic = (
I0_rms
* sqrt(2)
* cos(2 * pi * felec * time + 2 * rot_dir * 2 * pi / qs + Phi0)
)
simu_femm.input.Is = array([Ia, Ib, Ic]).transpose()
###Output
_____no_output_____
###Markdown
In this example stator currents are enforced as a function of time for each phase. Sinusoidal current can also be defined with Id/Iq as explained in [this tutorial](https://www.pyleecan.org/tuto_Operating_point.html). MagFEMM configurationFor the configuration of the Magnetic module, we use the object [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) that computes the airgap flux density by calling FEMM. The model parameters are set though the properties of the [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) object. In this tutorial we will present the main ones, the complete list is available by looking at [**Magnetics**](http://www.pyleecan.org/pyleecan.Classes.Magnetics.html) and [**MagFEMM**](http://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) classes documentation.*type_BH_stator* and *type_BH_rotor* enable to select how to model the B(H) curve of the laminations in FEMM. The material parameters and in particular the B(H) curve are setup directly [in the machine lamination material](https://www.pyleecan.org/tuto_Machine.html).
###Code
from pyleecan.Classes.MagFEMM import MagFEMM
simu_femm.mag = MagFEMM(
type_BH_stator=0, # 0 to use the material B(H) curve,
# 1 to use linear B(H) curve according to mur_lin,
# 2 to enforce infinite permeability (mur_lin =100000)
type_BH_rotor=0, # 0 to use the material B(H) curve,
# 1 to use linear B(H) curve according to mur_lin,
# 2 to enforce infinite permeability (mur_lin =100000)
file_name = "", # Name of the file to save the FEMM model
)
# Only the magnetic module is defined
simu_femm.elec = None
simu_femm.force = None
simu_femm.struct = None
###Output
_____no_output_____
###Markdown
Pyleecan coupling with FEMM enables to define the machine with symmetry and with sliding band to optimize the computation time. The angular periodicity of the machine will be computed and (in the particular case) only 1/8 of the machine will be drawn (4 symmetries + antiperiodicity):
###Code
simu_femm.mag.is_periodicity_a=True
###Output
_____no_output_____
###Markdown
The same is done for time periodicity only half of one electrical period is calculated (i.e: 1/8 of mechanical period):
###Code
simu_femm.mag.is_periodicity_t=True
###Output
_____no_output_____
###Markdown
Pyleecan enable to parallelize the call to FEMM by simply setting:
###Code
simu_femm.mag.nb_worker = 4 # Number of FEMM instances to run at the same time (1 by default)
###Output
_____no_output_____
###Markdown
At the end of the simulation, the mesh and the solution can be saved in the **Output** object with:
###Code
simu_femm.mag.is_get_meshsolution = True # To get FEA mesh for latter post-procesing
simu_femm.mag.is_save_meshsolution_as_file = False # To save FEA results in a dat file
###Output
_____no_output_____
###Markdown
Run simulation
###Code
out_femm = simu_femm.run()
###Output
_____no_output_____
###Markdown
When running the simulation, an FEMM window runs in background. You can open it to see pyleecan drawing the machine and defining the surfaces. The simulation will compute 32*p/8 different timesteps by updating the current and the sliding band boundary condition. If the parallelization is activated (simu_femm.mag.nb_worker >1) then the time steps are computed out of order.Once the simulation is finished, an Output object is return. The results are stored in the magnetic part of the output (i.e. _out_femm.mag_ ) and different plots can be called. This _out_femm.mag_ contains: - *Time*: magnetic time axis - *Angle*: magnetic position - *B*: airgap flux density (contains radial and tangential components) - *Tem*: electromagnetic torque - *Tem_av*: average electromagnetic torque- *Tem_rip_pp* : Peak to Peak Torque ripple- *Tem_rip_norm*: Peak to Peak Torque ripple normalized according to average torque - *Phi_wind_stator*: stator winding flux - *emf*: electromotive force Some of these properties are "Data objects" from the [SciDataTool](https://github.com/Eomys/SciDataTool) project. These object enables to handle unit conversion, interpolation, fft, periodicity... Plot results**Output** object embbed different plots to visualize results easily. A dedicated tutorial is available [here](https://www.pyleecan.org/tuto_Plots.html).For instance, the radial and tangential magnetic flux in the airgap at a specific timestep can be plotted with:
###Code
# Radial magnetic flux
out_femm.mag.B.plot_2D_Data("angle","time[1]",component_list=["radial"])
out_femm.mag.B.plot_2D_Data("wavenumber=[0,76]","time[1]",component_list=["radial"])
# Tangential magnetic flux
out_femm.mag.B.plot_2D_Data("angle","time[1]",component_list=["tangential"])
out_femm.mag.B.plot_2D_Data("wavenumber=[0,76]","time[1]",component_list=["tangential"])
###Output
_____no_output_____
###Markdown
The torque can be plotted with:
###Code
out_femm.mag.Tem.plot_2D_Data("time")
###Output
_____no_output_____
###Markdown
One can notice that the torque matrix includes the periodicity (only the meaningful part is stored)
###Code
print(out_femm.mag.Tem.values.shape)
print(simu_femm.input.Nt_tot)
###Output
_____no_output_____
###Markdown
If the mesh was saved in the output object (mySimu.mag.is_get_meshsolution = True), it can be plotted with:
###Code
out_femm.mag.meshsolution.plot_contour(label="B", group_names="stator core")
###Output
_____no_output_____
###Markdown
Finally, it is possible to extend pyleecan by implementing new plot by using the results from output. For instance, the following plot requires plotly to display the radial flux density in the airgap over time and angle.
###Code
#%run -m pip install plotly # Uncomment this line to install plotly
import plotly.graph_objects as go
from plotly.offline import init_notebook_mode
init_notebook_mode()
result = out_femm.mag.B.components["radial"].get_along("angle{°}", "time")
x = result["angle"]
y = result["time"]
z = result["B_r"]
fig = go.Figure(data=[go.Surface(z=z, x=x, y=y)])
fig.update_layout( )
fig.update_layout(title='Radial flux density in the airgap over time and angle',
autosize=True,
scene = dict(
xaxis_title='Angle [°]',
yaxis_title='Time [s]',
zaxis_title='Flux [T]'
),
width=700,
margin=dict(r=20, b=100, l=10, t=100),
)
fig.show(config = {"displaylogo":False})
###Output
_____no_output_____
###Markdown
Version information
###Code
from datetime import date
print("Running date:", date.today().strftime("%B %d, %Y"))
import pyleecan
print("Pyleecan version:" + pyleecan.__version__)
import SciDataTool
print("SciDataTool version:" + SciDataTool.__version__)
###Output
_____no_output_____
###Markdown
How to define a simulation to call FEMMThis tutorial shows the different steps to **compute magnetic flux and electromagnetic torque** with Pyleecan **automated coupling with FEMM**. This tutorial was tested with the release [21Apr2019 of FEMM](http://www.femm.info/wiki/Download). Please note that the coupling with FEMM is only available on Windows. The notebook related to this tutorial is available on [GitHub](https://github.com/Eomys/pyleecan/tree/master/Tutorials/tuto_Simulation_FEMM.ipynb).Every electrical machine defined in Pyleecan can be automatically drawn in FEMM to compute torque, airgap flux and electromotive force. Defining or loading the machineThe first step is to define the machine to simulate. For this tutorial we use the Toyota Prius 2004 machine defined in [this tutorial](https://www.pyleecan.org/tuto_Machine.html).
###Code
%matplotlib notebook
# Load the machine
from os.path import join
from pyleecan.Functions.load import load
from pyleecan.definitions import DATA_DIR
IPMSM_A = load(join(DATA_DIR, "Machine", "Toyota_Prius.json"))
IPMSM_A.plot()
###Output
_____no_output_____
###Markdown
Simulation definition InputsThe simulation is defined with a [**Simu1**](http://www.pyleecan.org/pyleecan.Classes.Simu1.html) object. This object corresponds to a simulation with 5 sequential physics (or modules):- electrical - magnetic - force - structural - acoustic Each physics/modules can have several models to solve them. For now pyleecan includes:- an Electrical model for PMSM machine with FEMM- a Magnetic model with FEMM for all machines- a Force model (Maxwell Tensor)- Magnetic and Structural models with GMSH/Elmer[**Simu1**](http://www.pyleecan.org/pyleecan.Classes.Simu1.html) object enforces a weak coupling between each physics: the input of each physic is the output of the previous one.The Magnetic physics is defined with the object [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) and the other physics are deactivated (set to None). We define the starting point of the simulation with an [**InputCurrent**](http://www.pyleecan.org/pyleecan.Classes.InputCurrent.html) object to enforce the electrical module output with:- angular and the time discretization - rotor speed - stator currents
###Code
from os.path import join
from numpy import ones, pi, array, linspace, cos, sqrt
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.InputCurrent import InputCurrent
from pyleecan.Classes.MagFEMM import MagFEMM
# Create the Simulation
simu_femm = Simu1(name="FEMM_simulation", machine=IPMSM_A)
p = simu_femm.machine.stator.winding.p
qs = simu_femm.machine.stator.winding.qs
# Defining Simulation Input
simu_femm.input = InputCurrent()
# Rotor speed [rpm]
simu_femm.input.N0 = 2000
# time discretization [s]
time = linspace(start=0, stop=60/simu_femm.input.N0, num=32*p, endpoint=False) # 32*p timesteps
simu_femm.input.time = time
# Angular discretization along the airgap circonference for flux density calculation
simu_femm.input.angle = linspace(start = 0, stop = 2*pi, num=2048, endpoint=False) # 2048 steps
# Stator currents as a function of time, each column correspond to one phase [A]
I0_rms = 250/sqrt(2)
felec = p * simu_femm.input.N0 /60 # [Hz]
rot_dir = simu_femm.machine.stator.comp_rot_dir()
Phi0 = 140*pi/180 # Maximum Torque Per Amp
Ia = (
I0_rms
* sqrt(2)
* cos(2 * pi * felec * time + 0 * rot_dir * 2 * pi / qs + Phi0)
)
Ib = (
I0_rms
* sqrt(2)
* cos(2 * pi * felec * time + 1 * rot_dir * 2 * pi / qs + Phi0)
)
Ic = (
I0_rms
* sqrt(2)
* cos(2 * pi * felec * time + 2 * rot_dir * 2 * pi / qs + Phi0)
)
simu_femm.input.Is = array([Ia, Ib, Ic]).transpose()
###Output
_____no_output_____
###Markdown
In this example stator currents are enforced as a function of time for each phase. Sinusoidal current can also be defined with Id/Iq as explained in [this tutorial](https://www.pyleecan.org/tuto_Operating_point.html). MagFEMM configurationFor the configuration of the Magnetic module, we use the object [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) that computes the airgap flux density by calling FEMM. The model parameters are set though the properties of the [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) object. In this tutorial we will present the main ones, the complete list is available by looking at [**Magnetics**](http://www.pyleecan.org/pyleecan.Classes.Magnetics.html) and [**MagFEMM**](http://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) classes documentation.*type_BH_stator* and *type_BH_rotor* enable to select how to model the B(H) curve of the laminations in FEMM. The material parameters and in particular the B(H) curve are setup directly [in the machine lamination material](https://www.pyleecan.org/tuto_Machine.html).
###Code
from pyleecan.Classes.MagFEMM import MagFEMM
simu_femm.mag = MagFEMM(
type_BH_stator=0, # 0 to use the material B(H) curve,
# 1 to use linear B(H) curve according to mur_lin,
# 2 to enforce infinite permeability (mur_lin =100000)
type_BH_rotor=0, # 0 to use the material B(H) curve,
# 1 to use linear B(H) curve according to mur_lin,
# 2 to enforce infinite permeability (mur_lin =100000)
file_name = "", # Name of the file to save the FEMM model
)
# Only the magnetic module is defined
simu_femm.elec = None
simu_femm.force = None
simu_femm.struct = None
###Output
_____no_output_____
###Markdown
Pyleecan coupling with FEMM enables to define the machine with symmetry and with sliding band to optimize the computation time. The angular periodicity of the machine will be computed and (in the particular case) only 1/8 of the machine will be drawn (4 symmetries + antiperiodicity):
###Code
simu_femm.mag.is_periodicity_a=True
###Output
_____no_output_____
###Markdown
The same is done for time periodicity only half of one electrical period is calculated (i.e: 1/8 of mechanical period):
###Code
simu_femm.mag.is_periodicity_t=True
###Output
_____no_output_____
###Markdown
Pyleecan enable to parallelize the call to FEMM by simply setting:
###Code
simu_femm.mag.nb_worker = 4 # Number of FEMM instances to run at the same time (1 by default)
###Output
_____no_output_____
###Markdown
At the end of the simulation, the mesh and the solution can be saved in the **Output** object with:
###Code
simu_femm.mag.is_get_meshsolution = True # To get FEA mesh for latter post-procesing
simu_femm.mag.is_save_meshsolution_as_file = False # To save FEA results in a dat file
###Output
_____no_output_____
###Markdown
Run simulation
###Code
out_femm = simu_femm.run()
###Output
_____no_output_____
###Markdown
When running the simulation, an FEMM window runs in background. You can open it to see pyleecan drawing the machine and defining the surfaces. The simulation will compute 32*p/8 different timesteps by updating the current and the sliding band boundary condition. If the parallelization is activated (simu_femm.mag.nb_worker >1) then the time steps are computed out of order.Once the simulation is finished, an Output object is return. The results are stored in the magnetic part of the output (i.e. _out_femm.mag_ ) and different plots can be called. This _out_femm.mag_ contains: - *Time*: magnetic time axis - *Angle*: magnetic position - *B*: airgap flux density (contains radial and tangential components) - *Tem*: electromagnetic torque - *Tem_av*: average electromagnetic torque- *Tem_rip_pp* : Peak to Peak Torque ripple- *Tem_rip_norm*: Peak to Peak Torque ripple normalized according to average torque - *Phi_wind_stator*: stator winding flux - *emf*: electromotive force Some of these properties are "Data objects" from the [SciDataTool](https://github.com/Eomys/SciDataTool) project. These object enables to handle unit conversion, interpolation, fft, periodicity... Plot results**Output** object embbed different plots to visualize results easily. A dedicated tutorial is available [here](https://www.pyleecan.org/tuto_Plots.html).For instance, the radial and tangential magnetic flux in the airgap at a specific timestep can be plotted with:
###Code
# Radial magnetic flux
out_femm.mag.B.plot_2D_Data("angle","time[1]",component_list=["radial"])
out_femm.mag.B.plot_2D_Data("wavenumber=[0,76]","time[1]",component_list=["radial"])
# Tangential magnetic flux
out_femm.mag.B.plot_2D_Data("angle","time[1]",component_list=["tangential"])
out_femm.mag.B.plot_2D_Data("wavenumber=[0,76]","time[1]",component_list=["tangential"])
###Output
_____no_output_____
###Markdown
The torque can be plotted with:
###Code
out_femm.mag.Tem.plot_2D_Data("time")
###Output
_____no_output_____
###Markdown
One can notice that the torque matrix includes the periodicity (only the meaningful part is stored)
###Code
print(out_femm.mag.Tem.values.shape)
print(simu_femm.input.Nt_tot)
###Output
_____no_output_____
###Markdown
If the mesh was saved in the output object (mySimu.mag.is_get_meshsolution = True), it can be plotted with:
###Code
out_femm.mag.meshsolution.plot_contour(label="B", group_names="stator core")
###Output
_____no_output_____
###Markdown
Finally, it is possible to extend pyleecan by implementing new plot by using the results from output. For instance, the following plot requires plotly to display the radial flux density in the airgap over time and angle.
###Code
#%run -m pip install plotly # Uncomment this line to install plotly
import plotly.graph_objects as go
from plotly.offline import init_notebook_mode
init_notebook_mode()
result = out_femm.mag.B.components["radial"].get_along("angle{°}", "time")
x = result["angle"]
y = result["time"]
z = result["B_r"]
fig = go.Figure(data=[go.Surface(z=z, x=x, y=y)])
fig.update_layout( )
fig.update_layout(title='Radial flux density in the airgap over time and angle',
autosize=True,
scene = dict(
xaxis_title='Angle [°]',
yaxis_title='Time [s]',
zaxis_title='Flux [T]'
),
width=700,
margin=dict(r=20, b=100, l=10, t=100),
)
fig.show(config = {"displaylogo":False})
###Output
_____no_output_____ |
2020_03_06/Talk_Part03_CreateYourElements.ipynb | ###Markdown
Create your own element, in this case a radar plotThis example is based on code from https://stackoverflow.com/questions/46564099/what-are-the-steps-to-create-a-radar-chart-in-bokeh-python
###Code
import numpy as np
from bokeh.plotting import figure, output_notebook, show
from bokeh.models import ColumnDataSource, LabelSet, HoverTool
output_notebook()
###Output
_____no_output_____
###Markdown
1. Data for one particular plot
###Code
values = [.5, 1, .8, .3, .8, .8, .8, .9]
dimensions = ['Dim 1','Dim 2','Dim 3','Dim 4','Dim 5','Dim 6','Dim 7','Dim 8']
dim_descr = {'Dim 1': 'What was the modality of the task',
'Dim 2': 'Was was the difficulty of the task',
'Dim 3': 'What was the valence of the task',
'Dim 4': 'How interesting was the task',
'Dim 5': 'Was the task too repetitive',
'Dim 6': 'Would you change the timing of the task',
'Dim 7': 'Would you recommend the task to your colleagues?',
'Dim 8': 'Were you aware of body motion associated with the task'}
responses = ['Visual', 'Very difficult', 'Very positive', 'Extremely Interesting', 'Stongly Agree', 'Strongly Disagree', 'Strongly Agree','Yes']
###Output
_____no_output_____
###Markdown
2. Create Figure and Draw Outer Circle
###Code
p = figure(match_aspect=True)
# Draw Outter Circle
centre = 0.5
p.circle(x=centre,y=centre,radius=0.5, fill_color=None, line_color='black', line_alpha=0.5)
show(p)
###Output
_____no_output_____
###Markdown
3. Draw intermediate circles
###Code
#Draw intermediate circles
p.circle(x=0.5,y=0.5,radius=.5, line_color='black', fill_color=None, line_alpha=0.5)
p.circle(x=0.5,y=0.5,radius=.1, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
p.circle(x=0.5,y=0.5,radius=.2, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
p.circle(x=0.5,y=0.5,radius=.3, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
p.circle(x=0.5,y=0.5,radius=.4, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
show(p)
###Output
_____no_output_____
###Markdown
4. Remove Grid and Non-polar Axes
###Code
# Visual Aspects
p.xgrid.visible=False
p.ygrid.visible=False
p.xaxis.visible=False
p.yaxis.visible=False
p.toolbar.logo=None
p.toolbar_location='below'
show(p)
###Output
_____no_output_____
###Markdown
5. Draw Polar Axes
###Code
def unit_poly_verts(theta, centre):
"""Return vertices of polygon for subplot axes.
This polygon is circumscribed by a unit circle centered at (0.5, 0.5)
"""
x0, y0, r = [centre] * 3
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return verts
# Obtain the Number of Dimensions
# ===============================
num_vars = len(values)
# Get the angle for each axes representing a dimension (from 0 to 2pi) + pi/2 --> To start on the y-axis
# ======================================================================================================
theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False)
theta += np.pi/2
# Compute the intersection points with the outter circel for each of the axes
# ===========================================================================
verts = unit_poly_verts(theta, centre)
x = [v[0] for v in verts]
y = [v[1] for v in verts]
# Draw concentrical lines
# =======================
for i,j in zip(x,y):
p.line(x=[centre,i],y=[centre,j], line_color='black', line_dash='dashed', line_alpha=0.5)
show(p)
###Output
_____no_output_____
###Markdown
6. Add Labels and hovering capabilities to axes
###Code
# Draw Outter Dots
# ================
out_dots_TOOLTIPS = [("Dim", "@desc")]
out_dots_src = ColumnDataSource({'x':x,'y':y,'desc':list(dim_descr.values())})
g_out_dots = p.circle(x='x',y='y', color='black', source=out_dots_src)
out_dots_hover = HoverTool(renderers=[g_out_dots], tooltips=out_dots_TOOLTIPS)
p.add_tools(out_dots_hover)
show(p)
# Draw Dimension Labels
# =====================
labels_src = ColumnDataSource({'x':[i if i >= 0.5 else i-.05 for i in x],'y':[i if i >= 0.5 else i-.05 for i in y],'text':dimensions})
labels = LabelSet(x="x",y="y",text="text",source=labels_src)
p.add_layout(labels)
show(p)
###Output
_____no_output_____
###Markdown
7. Add Patch for a given set of data
###Code
def radar_patch(r, theta, centre ):
""" Returns the x and y coordinates corresponding to the magnitudes of
each variable displayed in the radar plot
"""
# offset from centre of circle
offset = 0.0
yt = (r*centre + offset) * np.sin(theta) + centre
xt = (r*centre + offset) * np.cos(theta) + centre
return xt, yt
# Compute the polar coordinates for the available data
# ====================================================
xt, yt = radar_patch(np.array(values), theta, centre)
# Use Bokeh Patch Element to draw the data
# ========================================
p.patch(x=xt, y=yt, fill_alpha=0.3, fill_color='blue', line_color='blue', line_width=2)
show(p)
# Patch hovering
patch_dots_TOOLTIPS = [("Response:","@desc")]
patch_dots_src = ColumnDataSource({'xt':xt,'yt':yt,'desc':responses})
patch_dots = p.circle(x='xt',y='yt',color='black', source=patch_dots_src)
patch_dots_hover = HoverTool(renderers=[patch_dots], tooltips=patch_dots_TOOLTIPS)
p.add_tools(patch_dots_hover)
show(p)
def generate_radar_chart_from_vals(vals, strs, QD, color='black'):
centre = 0.5
num_vars = len(vals)
theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False)
theta += np.pi/2
verts = unit_poly_verts(theta, centre)
x = [v[0] for v in verts]
y = [v[1] for v in verts]
p =figure(match_aspect=True)
# Draw Outter Dots
out_dots_TOOLTIPS = [("Q:", "@desc")]
out_dots_src = ColumnDataSource({'x':x,'y':y,'desc':list(QD.values())})
g_out_dots = p.circle(x='x',y='y', color='black', source=out_dots_src)
out_dots_hover = HoverTool(renderers=[g_out_dots], tooltips=out_dots_TOOLTIPS)
p.add_tools(out_dots_hover)
# Draw Outter Circle
p.circle(x=0.5,y=0.5,radius=0.5, fill_color=None, line_color='black', line_alpha=0.5)
# Draw concentrical lines
for i,j in zip(x,y):
p.line(x=[centre,i],y=[centre,j], line_color='black', line_dash='dashed', line_alpha=0.5)
#Draw intermediate circles
p.circle(x=0.5,y=0.5,radius=.5, line_color='black', fill_color=None, line_alpha=0.5)
p.circle(x=0.5,y=0.5,radius=.1, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
p.circle(x=0.5,y=0.5,radius=.2, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
p.circle(x=0.5,y=0.5,radius=.3, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
p.circle(x=0.5,y=0.5,radius=.4, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
# Visual Aspects
p.xgrid.visible=False
p.ygrid.visible=False
p.xaxis.visible=False
p.yaxis.visible=False
p.toolbar.logo=None
p.toolbar_location='below'
# Draw Question IDs
labels_txt = ['Q'+str(i).zfill(2) for i in range(1,num_vars+1)]
labels_src = ColumnDataSource({'x':[i if i >= 0.5 else i-.05 for i in x],'y':[i if i >= 0.5 else i-.05 for i in y],'text':labels_txt})
labels = LabelSet(x="x",y="y",text="text",source=labels_src)
p.add_layout(labels)
xt, yt = radar_patch(np.array(vals), theta, centre)
p.patch(x=xt, y=yt, fill_alpha=0.3, fill_color=color, line_color=color, line_width=2)
# Patch hovering
patch_dots_TOOLTIPS = [("Response:","@desc")]
patch_dots_src = ColumnDataSource({'xt':xt,'yt':yt,'desc':strs})
patch_dots = p.circle(x='xt',y='yt',color='black', source=patch_dots_src)
patch_dots_hover = HoverTool(renderers=[patch_dots], tooltips=patch_dots_TOOLTIPS)
p.add_tools(patch_dots_hover)
p.width=425
p.height=425
return p
###Output
_____no_output_____ |
examples/kilojoule Intro.ipynb | ###Markdown
kilojoule LibraryWe will be using a custom python library, `kilojoule`, written specifically for this course. The main advantage to this approach is the nomenclature for the functions you will be using in Python will be consistent with the nomenclature from your textbook. The disadvantage to this approach is there will be limited sources for external tech support (you won't find example code using this library outside of this course).Prior to using this library, it needs to be installed to a location where the Python interpreter can find it. If you are using Coclac, this will have already been done for you. I you are using a local installation of Python you can install the library from the Python Package Index (PyPi) using the command `pip install kilojoule`.After installing the library, you will need to import it into each notebook where you intend to use it. If you are referring back to this document and want a quick import template for starting a new file, you can use the following code. If this is your first time reading this document, the code in the following block will be explained in detail below.
###Code
from kilojoule.templates.kSI_C import *
# Initialize an interface to evaluate fluid properties
# You can create multiple interfaces as long as they are stored in different variables
air = idealgas.Properties('Air', unit_system='kSI_C') # Treat air as an ideal gas
water = realfluid.Properties('Water', unit_system='kSI_C') # Treat water as a real fluid
###Output
_____no_output_____
###Markdown
UnitsThe `kilojoule` library is designed to make use of dimensional "quantities" rather than simply doing calculations with numbers. In engineering, all your calculations use numbers to represent physical quantities and your calculations have no meaning without connection to appropriate units. By including the physical units as an integral part of the calculation process, we keep the physical significance of our calculation in focus and will avoid costly mistakes from unit conversion errors. To do this we will make use of the third-party library `pint` for managing units. By executing the code in the block before this one, you have already loaded this library in the background; it is accessible through the `units` and `Quantity` objects.We will first define a few property values, i.e. temperature and pressure
###Code
# The Quantity(value,units) notation defines a physical quantity with a magnitude associated with a type of unit
T = Quantity(300.0,'degK')
print(T)
print(f'T = {T} = {T.to("degC")} = {T.to("degF")} = {T.to("degR")}')
p = Quantity(1.0,'atm')
print(p)
print(f'p = {p} = {p.to("kPa")} = {p.to("Pa")} = {p.to("psi")} = {p.to("bar")}')
p.ito('kPa')
print(p)
###Output
300.0 K
T = 300.0 K = 26.85 °C = 80.33 °F = 540.0 °R
1.0 atm
p = 1.0 atm = 101.33 kPa = 1.0132×10⁵ Pa = 14.696 psi = 1.0133 bar
101.33 kPa
###Markdown
We were able to access the quantities stored in the variables `T` and `p` in any unit system by using the notation `var.to("desired units")`, which temporarily convertes the units to the specified form, or we can permanently convert a variable to a different unit system using the notation `var.ito("desired units")`. We defined temperature in metric units, then displayed it in both alternate metric and English units. Whereas we defined pressure in English units, then displayed it in both alternate English and metric units. This system allows us to quickly switch back and forth between unit systems as needed. The real benefit of this system is most evident when we start performing calculations with combined units. In the following code we will calculate the change in energy of a mass that is changing temperature, velocity, and elevation.\begin{align}\Delta E_{CV} &= m(\Delta u + \Delta ke + \Delta pe) \\&= m\left(u_2-u_1 + \frac{V_2^2}{2}-\frac{V_1^2}{2} + g(z_2-z_1)\right)\end{align}
###Code
m = Quantity(10.0,'kg') # metric
u_1 = Quantity(300.0,'kJ/kg') # metric
u_2 = Quantity(200.0,'kJ/kg') # metric
Vel_1 = Quantity(20.0,'mph') # English
Vel_2 = Quantity(30.5,'m/s') # metric
g = Quantity(9.8,'m/s^2') # metric
z_2 = Quantity(30.1,'ft') # English
z_1 = Quantity(1.2,'m') # metric
Delta_u = u_2-u_1
print(f"Delta u = {u_2} - {u_1} = {Delta_u}")
Delta_ke = (Vel_2**2-Vel_1**2)/2
print(f"Delta ke = {Vel_2**2/2} - {Vel_1**2/2} = {Delta_ke}")
Delta_pe = g*(z_2-z_1)
print(f"Delta pe = {g}({z_2}-{z_1}) = {Delta_pe}")
Delta_E_CV = m*(u_2-u_1 + (Vel_2**2-Vel_1**2)/2 + g*(z_2-z_1))
print(f"Delta E = {m}({Delta_u} + {Delta_ke} + {Delta_pe}) = {Delta_E_CV}")
Calculations();
###Output
Delta u = 200.0 kJ/kg - 300.0 kJ/kg = -100.0 kJ/kg
Delta ke = 465.12 m²/s² - 200.0 mph² = 425.16 m²/s²
Delta pe = 9.8 m/s²(30.1 ft-1.2 m) = 256.4 ft·m/s²
Delta E = 10.0 kg(-100.0 kJ/kg + 425.16 m²/s² + 256.4 ft·m/s²) = -994.97 kJ
###Markdown
Notice that in the above example, the units for each of the terms were in different systems until they were combined. States DatabaseMany of the systems we will be analyzing will have many states with multiple properties of interest at each state. Keeping track of these states and properties in a consistent, organized manner will make your code cleaner and easier to maintain. To aid in this, the `kilojoule` library provides a data structure designed specifically for this purpose. The `QuantityTable` behaves similarly to a nested Python dictionary. You can view the data structure as a table with columns representing properties and rows representing states. Each property column has a defined unit that will apply to all it's values, i.e. all temperatures stored in $^\circ\text{C}$. We first need to import the `QuantityTable` class from the `kilojoule.organization` module. *(Note: this will already be loaded if you recently executed the first code block in this notebook)*
###Code
from kilojoule.organization import QuantityTable
###Output
_____no_output_____
###Markdown
We can now initialize our states database (`QuantityTable(...)`) and store it in a variable where we can easily access it (`states = ...`). There are a few ways to fill out the table columns with properties and units, but the straight forward way is to make a dictionary with the desired properties as keys associated with the appropriate units (`properties_dict = {'property symbol':'units', ...}`). Note: a few templates, such as the one you imported at the beginning of this notebook, provide pre-built tables for common variables used in this course to make this process easier.
###Code
# Make a dictionary with the types of properties you want to track and units for each property
properties_dict = {
'T':'degC', # Temperature: unit options ('K','degC','degF','degR')
'p':'kPa', # pressure: unit options ('kPa','bar','psi','atm',etc.)
'v':'m^3/kg', # specific volume
'u':'kJ/kg', # specific internal energy
'h':'kJ/kg', # specific enthalpy
's':'kJ/kg/K', # specific entropy
'x':'', # quality: dimensionless units enter as an empty string
}
# Make a database to hold the property values for each state and store in the variable name `states`
states = QuantityTable(properties=properties_dict)
# The states container is initially empty
print(states)
###Output
Empty DataFrame
Columns: []
Index: []
###Markdown
The table will initially be empty, be we can add property values for different states to it on the fly. Recall that we defined preferred units for each of the property columns. In the example below we will define some temperatures and pressures in consistent units, inconsistent units, and with missing units.
###Code
states[1,'T'] = Quantity(30,'degC') # consistent units
states[2,'p'] = Quantity(1,'atm') # inconsistent units (will be converted kPa)
states[3,'T'] = 100 # missing units (assumed to be degC)
states[3,'p'] = 200 # missing units (assumed to be kPa)
print(states)
###Output
T p
unit °C kPa
1 30 -
2 - 101.325
3 100 200
###Markdown
Notice that we originally defined the temperature column to have units of $^\circ\text{C}$, then we explicitly defined a temperature quantity with units of $^\circ\text{C}$ and placed it in state 1 in the temperature column (`states[state, property] = value`). We then defined a pressure for state 2, but we used an inconsistent unit, i.e. we defined it in $\text{atm}$ when the column expected $\text{kPa}$. When we view the contents of the states database (`print(states)`) we see that the pressure value at state 2 was automatically converted to $\text{kPa}$. Finally we defined a temperature and pressure for state 3 without explicitly stating the units. When this happens, it will be assumed that the values are already in the preferred units. While this makes the syntax shorter, it is not a good practice since changes in other parts of the code could have unexpected consequences. An alternate (shorter) syntax for working with the values in the table can be enabled by assigning each column in the table to a variable in the local namespace. After executing the code below, we will be able to set the quality at state 2 to 50% with the code `x[2] = 0.5` rather than needing to type `states[1,'x'] = 0.5`. Note: this step will also be performed for you if you import one of the pre-built templates.
###Code
# The following lines will define (overwrite) convenience variables in the local name space for each of the properties in the states container
# This allows you to add values to (or pull values from) the database using the nomenclature T[1], T[2], p[3], etc.
for property in states.properties:
globals()[property] = states.dict[property]
x[2] = 0.5
T['inlet'] = Quantity(25,'degC')
print(states)
###Output
T p x
unit °C kPa
1 30 - -
2 - 101.325 0.5
3 100 200 -
inlet 25 - -
###Markdown
The preferred units for each property column can be changed at any time using the `.set_units()` method and all values in that column will be automatically converted to the new units
###Code
T.set_units('degF')
p.set_units('psi')
states.display()
T.set_units('K')
p.set_units('Pa')
states.display()
T.set_units('degC')
p.set_units('kPa')
states.display()
###Output
_____no_output_____
###Markdown
PropertiesDuring our calculations for this course, we will often need to evaluate material/fluid properties at various states. The traditional method for doing this is to read values from a table that will often involve interpolation (sometimes even double interpolation). You will still be expected to know how to use the property tables for this course (especially during exams), but you will also be expected to use tools that automate this procedure so you can investigate more complex problems that are not easily solved through hand calculations. This will be achieved using third-party Libraries: `CoolProp` for real-fluid properties and `PYroMat` for ideal-gas properties. Each of these libraries can be used directly in Python without loading the `kilojoule` package. However, we will be primarily using a wrapper for theses libraries provided by the `kilojoule` package, which incorporates the `pint` package to handle a wider range of units and also renames a number of parameters to be consistent with the nomenclature we will be using in this course. Water PropertiesIn the first code block at the top of this notebook, you imported the `realfluid` class from the `kilojoule` library. This module contains a `Properties` class that can be used to evaluate the properties of a number of real (*pure/pseudopure*) fluids. You are already familiar with looking up properties for water from the tables from your Thermo I course. Recall that for a pure substance you need two independent, intensive properties to fix a state, i.e. if you know two independent properties you can find any other property that you need for the state *(Note: there is a little more to the story here, but we will get to that later in the course)*. For now, let's say we have water at $T_1=300^\circ\text{C}$ and $p_1=750\,\text{kPa}$ and we would like to find the specific volume, $v$, specific internal energy, $u$, specific enthalpy, $h$, and specific entropy, $s$. For each of these cases, we could say the desired (dependent) property is a function of the two known (independent) properties:$$v_1 = v(T=300^\circ\text{C}, p=750\,\text{kPa})$$$$u_1 = u(T=300^\circ\text{C}, p=750\,\text{kPa})$$$$h_1 = h(T=300^\circ\text{C}, p=750\,\text{kPa})$$$$s_1 = s(T=300^\circ\text{C}, p=750\,\text{kPa})$$In order to use the `kilojoule.realfluid.Properties` class, we first need to instantiate it (Python-speak for initialize the class and store in a variable). The following code block will import the class (if needed), set the target fluid to be water, and set the default unit system to be metric with temperatures in $^\circ\text{C}$
###Code
from kilojoule.templates.kSI_C import *
from kilojoule import realfluid
water = realfluid.Properties('Water', unit_system='kSI_C') # the default unit_system is 'kSI_C' other options are 'SI', 'SI_C', kSI', 'USCS_F', and 'USCS_R'
###Output
_____no_output_____
###Markdown
The `water` object now has sub-functions (or methods) that can be used to evaluate (look up) dependent properties.
###Code
# Define known values (independent properties)
T[1] = Quantity(300.0,'degC')
p[1] = Quantity(750.0,'kPa')
# Look up dependent properties correspondign to $T_1$ and $p_1$
# specific volume
v[1] = water.v(T=T[1], p=p[1])
# specific internal energy
u[1] = water.u(T=T[1], p=p[1])
# specific enthalpy
h[1] = water.h(T=T[1], p=p[1])
# specific entropy
s[1] = water.s(T=T[1], p=p[1])
# quality
x[1] = water.x(T=T[1], p=p[1])
# phase
phase[1] = water.phase(T=T[1], p=p[1])
Calculations()
states.display()
###Output
_____no_output_____
###Markdown
Notice the quality, $x_1$, was reported to be `N/A` because the substance is a single-phase, superheated vapor at state 1, so the quality is not defined for this state. We can also use the same functions for evaluating the properties of saturated fluids. Let's assume the fluid from state 1 in the above example is cooled at a constant pressure until it is entirely in liquid form, i.e. $x_2=0$. We could then find all the remaining properties at state 2 as well.$$ p_2 = p_1 $$$$ T_2 = T(p=p_2, x=x_2) $$$$ v_2 = v(p=p_2, x=x_2) $$$$ u_2 = u(p=p_2, x=x_2) $$$$ h_2 = h(p=p_2, x=x_2) $$$$ s_2 = x(p=p_2, x=x_2) $$
###Code
# Independent properties that fix state 2
p[2] = p[1]
x[2] = 0
# Dependent properties corresponding to $p[2]$ and $x[2]$
T[2] = water.T(p=p[2], x=x[2])
v[2] = water.v(p=p[2], x=x[2])
u[2] = water.u(p=p[2], x=x[2])
h[2] = water.h(p=p[2], x=x[2])
s[2] = water.s(p=p[2], x=x[2])
phase[2] = water.phase(p=p[2],x=x[2])
Calculations()
states.display()
###Output
_____no_output_____
###Markdown
Notice the phase for state 2 is reported as `twophase`, even though we know it is entirely liquid because the quality is 0. This state would be more accurately described as a saturated-liquid, but the `CoolProp` library reports all saturated states (saturate liquid, saturated mixture, and saturated vapor) as `twophase`. Let's now calculate a third state that would be obtained from an isenthalpic expansion to $p_3=100\,\text{kPa}$ resulting in a saturated mixture.$$ h_3 = h_2 $$$$ p_3 = 100\,\text{kPa} $$$$ T_3 = T(p=p_3, h=h_3) $$$$ v_3 = v(p=p_3, h=h_3) $$$$ u_3 = u(p=p_3, h=h_3) $$$$ x_3 = x(p=p_3, h=h_3) $$$$ s_3 = x(p=p_3, h=h_3) $$
###Code
# Independent properties that fix the state
h[3] = h[2]
p[3] = Quantity(100.0,'kPa')
# Dependent properties corresponding to $p_3$ and $h_3$
T[3] = water.T(p=p[3], h=h[3])
v[3] = water.v(p=p[3], h=h[3])
u[3] = water.u(p=p[3], h=h[3])
x[3] = water.x(p=p[3], h=h[3])
s[3] = water.s(p=p[3], h=h[3])
phase[3] = water.phase(p=p[3], h=h[3])
Calculations()
states.display()
###Output
_____no_output_____
###Markdown
Short-form Notation for Property EvaluationThe procedure illustrated above is somewhat repetitive and can be shortened in a few ways. For each call to the `water.property()` object, we explicitly told the function both the type and value of each of the argments passed to it (or in Python lingo, we gave both a keyword and an argument). However, we can take advantage of the units associated with each of the variables to infer the appropriate property type, i.e. if the argument has units of $kPa$ we can safely assume the appropriate keyword should be `p` for pressure. Therefore, we could shorten the line `T[3] = water.T(p=p[3], x=x[3])` to `T[3] = water.T(p[3], x[3])`. **Note: the approach will not work when passing an passing an internal energy or enthalpy argument because they share the same units and you must use the long-from notation, i.e. (...,u=u[3]) or (..., h=h[3]).** Also, since we are using the same two independent properties for each of the dependent property calls, we could use a loop to automate the process. The command `states.fix(3, water)` will attempt to use the specified `water` property table to evaluate all missing properties at state `3` using the information already in the table as independent properties.To illustrate this, let's calculate a fourth state that would be obtained from an isobaric expansion to a temperature of $T_4=150^\circ \mathrm{C}$.$$ p_4 = p_3 $$$$ T_4 = 150^\circ\mathrm{C} $$
###Code
# Independent properties that fix the state
p[4] = p[3]
T[4] = Quantity(150.0,'degC')
# Dependent properties corresponding to $T_4$ and $p_4$
# using short-form notation
v[4] = water.v(T[4], p[4])
# or using `states.fix(4,water)` fill in the rest of the table
states.fix(4, water)
Calculations()
states.display()
###Output
_____no_output_____
###Markdown
Plotting Property DiagramsIt is often helpful to visualize processes by plotting the states on property diagrams, i.e. $T$-$s$, $p$-$v$, $p$-$h$, etc. The `kilojoule` library provides a `.property_diagram()` method for each of the fluid property tables that can create common property diagrams used in thermodynamics. This class uses the popular `matplotlib` library. You first instantiate the class by telling it which properties you want represented on the $x$ and $y$ axes and the unit system (if not using the default/same units as the table). You can also use the `saturation` parameter to specify whether or not to draw the saturation curves (the default is `True` for real fluids). In the following code, we will store a plot object (instance of the `PropertyPlot` class) in the variable `Ts`. > `Ts = water.property_diagram(x='s', y='T', unit_system='USCS_F', saturation=True)`The `Ts` object contains a `matplotlib` figure and axis stored as attributes accessible at `Ts.fig` and `Ts.ax`. We can use any `matplotlib` functions on these objects to add features to the diagram (many examples are available on the internet). However, there are a few custom `matplotlib` routines built into the `PropertyPlot` class, which will make it easier to show visualize the process we will be analyzing. The simplest built-in construct is the `.plot_point(x, y, label='label', label_loc='north')` method, which places a dot at the $x$, $y$ coordinates with an optional label placed at the relative location provided (default is north)> `Ts.plot_point(x=s[1], y=T[1], label='1', label_loc='north')`An alternate interface is also available if your data is stored in the `QuantityTable()` class described above.> `Ts.plot_state(states[2])` > `Ts.plot_state(states[3])`We also like to draw lines connecting states to illustrate processes. However, we do not want to simply draw straight lines connecting the points. Rather, we would like the path of the line to represent the process properties at all points. We can do this if we know something that was constant during the process, ie. pressure was constant from 1 to 2 and enthalpy was constant from 2 to 3 in our earlier example. The `.plot_process()` method accepts two states and a path description to achieve this:>`Ts.plot_process(states[1], states[2], path='isobaric')` >`Ts.plot_process(states[2], states[3], path='isenthalpic')`
###Code
# Create Ts_diagram instance
Ts = water.property_diagram(x='s', y='T', unit_system='English_F', saturation=True)
# Plot Critial and Triple Points
Ts.plot_triple_point(label='TP',label_loc='northwest')
Ts.plot_critical_point(label_loc='south')
# Plot State 1 using the .plot_point() method
Ts.plot_point(x=s[1], y=T[1], label='1', label_loc='north')
# Plot States 2 and 3 using the .plot_state() method
Ts.plot_state(states[2])
Ts.plot_state(states[3], label_loc='south west')
Ts.plot_state(states[4])
# Connect the states with lines that illustrate the process paths
Ts.plot_process(states[1], states[2], path='isobaric')
Ts.plot_process(states[2], states[3], path='isenthalpic')
Ts.plot_process(states[3], states[4], path='isobaric');
###Output
_____no_output_____
###Markdown
We can use this same process to also create additional plots for our system by changing the parameters when we call the `.property_diagram()` method, or we can shorten the syntax if we use one of the buit-in property combinations `pv_diagram, Ts_diagram, Tv_diagram, hs_diagram, ph_diagram, pT_diagram`
###Code
# Create pv_diagram instance
diag = water.pv_diagram(unit_system='SI_K') # Note: this is the only line that will be changed for the next few examples
# Plot Critial and Triple Points
diag.plot_triple_point(label_loc='northwest')
diag.plot_critical_point(label_loc='south')
# Plot States 1-3 using the .plot_state() method
diag.plot_state(states[1])
diag.plot_state(states[2])
diag.plot_state(states[3], label_loc='south west')
diag.plot_state(states[4])
# Connect the states with lines that illustrate the process paths
diag.plot_process(states[1], states[2], path='isobaric')
diag.plot_process(states[2], states[3], path='isenthalpic')
diag.plot_process(states[3], states[4], path='isobaric');
# Create ph_diagram instance
diag = water.ph_diagram(unit_system='SI_C') # Note: this is the only line that will be changed for the next few examples
# Plot Critial and Triple Points
diag.plot_triple_point(label_loc='northwest')
diag.plot_critical_point(label_loc='south')
# Plot States 1-3 using the .plot_state() method
diag.plot_state(states[1])
diag.plot_state(states[2])
diag.plot_state(states[3], label_loc='south west')
diag.plot_state(states[4])
# Connect the states with lines that illustrate the process paths
diag.plot_process(states[1], states[2], path='isobaric')
diag.plot_process(states[2], states[3], path='isenthalpic')
diag.plot_process(states[3], states[4], path='isobaric');
diag = water.Tv_diagram() # Note: this is the only line that will be changed for the next few examples
# Plot Critial and Triple Points
diag.plot_triple_point(label_loc='northwest')
diag.plot_critical_point(label_loc='south')
# Plot States 1-3 using the .plot_state() method
diag.plot_state(states[1])
diag.plot_state(states[2])
diag.plot_state(states[3], label_loc='south west')
diag.plot_state(states[4])
# Connect the states with lines that illustrate the process paths
diag.plot_process(states[1], states[2], path='isobaric')
diag.plot_process(states[2], states[3], path='isenthalpic')
diag.plot_process(states[3], states[4], path='isobaric');
diag = water.hs_diagram() # Note: this is the only line that will be changed for the next few examples
# Plot Critial and Triple Points
diag.plot_triple_point(label_loc='northwest')
diag.plot_critical_point(label_loc='south')
# Plot States 1-3 using the .plot_state() method
diag.plot_state(states[1])
diag.plot_state(states[2])
diag.plot_state(states[3], label_loc='south west')
diag.plot_state(states[4])
# Connect the states with lines that illustrate the process paths
diag.plot_process(states[1], states[2], path='isobaric')
diag.plot_process(states[2], states[3], path='isenthalpic')
diag.plot_process(states[3], states[4], path='isobaric');
###Output
_____no_output_____
###Markdown
The ability to generate the previous 5 diagrams using the same set of commands (with only minor changes to the first line) provides a excellent opportunity to write a loop to decrease the amount of code you need to write and maintain. Refrigerant PropertiesAll the commands demonstrated above will also work for any of the other pure/pseudopure substances, such as R-134a, supported by the underlying `CoolProp` library, a list of which can be obtained with the following code.
###Code
from kilojoule import realfluid
realfluid.fluids()
###Output
_____no_output_____
###Markdown
To obtain properties for any of the supported fluids, simply supply the appropriate name when you instantiate the `realfluid.Properties()` classe, i.e.
###Code
r134a = realfluid.Properties('R134a')
T_ref = Quantity(30,'degC')
x_ref = Quantity(0.25,'') # Note: quality is dimensionless, so we define its units as an empty string
h_ref = r134a.h(T=T_ref, x=x_ref)
print(f'h_ref = {h_ref}')
v_ref = r134a.v(T=T_ref, x=x_ref)
print(f'v_ref = {v_ref}')
s_ref = r134a.s(T=T_ref, x=x_ref)
print(f's_ref = {s_ref}')
Ts_diagram = r134a.Ts_diagram()
Ts_diagram.plot_triple_point()
Ts_diagram.plot_critical_point()
Ts_diagram.plot_point(x=s_ref, y=T_ref, label='1');
###Output
h_ref = 285.0 kJ/kg
v_ref = 0.007292 m³/kg
s_ref = 1.2862 kJ/kg/Δ°C
###Markdown
Air Properties Ideal Gas Air PropertiesYour textbook treats air as an ideal gas. As a result, the internal energy and enthalpy values from the tables in the back of the book are only a function of temperature. Therefore, you only need one independent, intensive property, temperature, to find the enthalpy at a state, since the ideal gas law is used to fix the other degree of freedom (therefore removing the need for a second independent property), i.e. $$h=h(T)\qquad\text{for an ideal gas only}$$The entropy, however, is still dependent on the pressure (even with the ideal gas assumption applied). Since the ideal gas air tables are only tabulated by temperature, it is not possible to look up the entropy directly with the pressure information also being accounted for. To workaround this problem, your textbook tabulates $s^o$ rather than $s$. Where the $^o$ is provided to remind you that it is only the temperature dependent portion of the change in entropy. To get the full change in entropy between two states using the information from the tables, you can use$$ \Delta s_{1\to2} = s_2^o-s_1^o - R\ln\frac{p_2}{p_1} $$where $s_2^o$ and $s_1^o$ are from the tables, $R$ is the specific gas constant, and $p_2$ and $p_1$ are the pressures of the fluid in absolute units (i.e. *absolute pressure* not gauge pressure).Ideal gas properties for air (and many other gases) can be obtained from the `PYroMat` library using the Burcat equations. The `kilojoule` library provides a wrapper to access this library using the same syntax as we used for the real-fluid library above. This wrapper is provided by the `idealgas` module.> `from kilojoule import idealgas` > `air = idealgas.Properties('Air')`
###Code
from kilojoule import idealgas
air = idealgas.Properties('Air', unit_system='kSI_C')
T_air = Quantity(450,'K')
p_air = Quantity(1.0,'atm')
h_air = air.h(T=T_air)
s_air = air.s(T=T_air,p=p_air)
print(f'h_air = {h_air}')
print(f's_air = {s_air}')
Calculations();
###Output
h_air = 149.37 kJ/kg
s_air = 7.114 kJ/kg/Δ°C
###Markdown
Pseudopure Real Fluid Air PropertiesWhile we can obtain reasonably accurate answer for many engineering systems that involve air using the ideal gas assumption, there are cases when we need to treat air as a real fluid instead. The `CoolProp` library used in `kilojoule` does not treat air as an ideal gas, rather it treats air as a *pseudopure fluid*. In this context we call the air a *pseudopure fluid* because it is really a mixture (approximately $79\%\text{N}_2$ and $21\%\text{O}_2$) but we treat it as if it were a pure fluid with known properties. As a result, you still need to provide two independent, intensive properties when using the `kilojoule.realfluid.Properties` class with air.
###Code
air = realfluid.Properties(fluid='Air')
T_air = Quantity(450,'K')
p_air = Quantity(1.0,'atm')
h_air = air.h(T=T_air, p=p_air)
print(f'h_air = {h_air}')
Calculations();
###Output
h_air = 578.13 kJ/kg
###Markdown
At this point it is worth pointing out that the ideal gas tables and the real fluid tables gave significantly different answers for the enthalpy in the previous example (neither of which are in agreement with your textbook). This is because enthalpy is an integrated property and the libraries used to evaluate these properties use different reference states (or starting points for the integration). While this may seem like a major problem, it is not. The vast majority of our thermodynamics calcuations will look at changes in intergrated properties, such as internal energy, enthalpy, entropy, Gibbs function, etc., rather than their absoute values. So as long as you pull all your properties from the same table your final results will be (nearly) the same regardless of which set of tables you used. However, you cannot mix and match between different property sources. Humid AirLater in this course we will study mixtures, rather than just pure substances. One common mixture encountered in many engineering applications is humid air (a mixture of air and water vapor). Because we will be treating humid air as a mixture of two substances (with air still being treated as a pseudopure fluid), we will need three independent intensive properties to fix the state. The fluid properties for humid air can be reached in the same way as the pure/pseudopure substance, with the exception that you need to provide three independent properties to fix the state instead of two and you need to use the `humidair.Properties` class instead of the `realfluid.Properties` class.
###Code
from kilojoule.templates.humidair_default import *
# Start with air at 30 C, 50% relative humidity, at 1 atmosphere
T[1] = Quantity(30,'degC')
rel_hum[1] = 0.5
p[1] = Quantity(1,'atm')
T_wb[1] = humidair.T_wb(T[1],p[1],rel_hum[1])
h[1] = humidair.h(T[1],p[1],rel_hum[1])
v[1] = humidair.v(T[1],p[1],rel_hum[1])
s[1] = humidair.s(T[1],p[1],rel_hum[1])
omega[1] = humidair.omega(T[1],p[1],rel_hum[1])
# Use a simple cooling process to lower the temperature and dehumidify by cooling to 10 C
T[2] = Quantity(10,'degC')
rel_hum[2] = 1
p[2] = p[1]
states.fix(2,humidair)
states.display()
###Output
_____no_output_____
###Markdown
The `kilojoule` library provides a routine for drawing psychrometric charts to visualize humid air systems. *Note: this can be used to generate psychrometric charts for non-standard pressures and unit systems*
###Code
psych = humidair.psychrometric_chart()
psych.plot_state(states[1])
psych.plot_state(states[2],label_loc='south east')
psych.plot_process(states[1],states[2],path='simple cooling');
from kilojoule.templates.humidair_default import *
p[1] = Quantity(85,'kPa')
humidair.p = Quantity(p[1])
# Start with air at 30 C, 50% relative humidity, at 0.85 atmosphere
T[1] = Quantity(30,'degC')
rel_hum[1] = 0.5
T_wb[1] = humidair.T_wb(T[1],p[1],rel_hum[1])
h[1] = humidair.h(T[1],p[1],rel_hum[1])
v[1] = humidair.v(T[1],p[1],rel_hum[1])
s[1] = humidair.s(T[1],p[1],rel_hum[1])
omega[1] = humidair.omega(T[1],p[1],rel_hum[1])
# Use a simple cooling process to lower the temperature and dehumidify by cooling to 10 C
T[2] = Quantity(10,'degC')
rel_hum[2] = 1
p[2] = p[1]
states.fix(2,humidair)
states.display()
psych = humidair.psychrometric_chart()
psych.plot_state(states[1])
psych.plot_state(states[2],label_loc='south east')
psych.plot_process(states[1],states[2],path='simple cooling');
###Output
_____no_output_____
###Markdown
Equation FormattingSimply arriving at the correct answer for a problem is only half the battle. You then need to be able to communicate your methods and results to a range of audiences (in this class your instructor). This should be done following technical writing conventions with a narrative discussion of your process including properly formatted equations and sample calculations. It is not sufficient to simply submit your code and a final numerical answer or a long list of equations without any explanation.Throughout your academic career you have learned many different conventions (shorthand) for writing down mathematical concepts, i.e. to show a variable is being raised to a power we put that power in the superscript $x^2$. However, there is no key on your keyboard to make that 2 shrink in size and move above the variable. You'll also notice that the $x$ was not written in the same font as the rest of the text. It is convention for variables to be written in italics rather than normal font because it helps the reader quickly distinguish them from regular text (you probably already do this in your head without realizing it).There are a few ways to create properly formatted equations. While the Microsoft equation editor has improved greatly in recent years, the most powerful tool is the formatting language $\LaTeX$. $\LaTeX$ has been around for many decades and it was developed to represent complex mathematical expressions using plain text (just the keys on a regular keyboard). While there is a bit of a learning curve if you choose to start using $\LaTeX$, your efforts will pay off many times over as you will find that most scientific/mathematical software has $\LaTeX$ support built in. In fact, learning $\LaTeX$ will even make you faster when you do need to use Microsoft Equations editor because it includes support for many $\LaTeX$ symbol names.The Jupyter notebook this document is being created in has built-in $\LaTeX$ support. In some of the earlier examples you may have noticed special symbols in some of the output, such as $\Delta^\circ\text{C}$. Those were created using $\LaTeX$ formatting and the special symbols in this explanation were also created using $\LaTeX$ formatting (if you are reading this in a live notebook, double-click on this cell to see the source code written in Markdown syntax). You can include inline math, $f(x)=5x^2-3x+2$, or you can include "display" math $$f(x) = \int_0^\infty\frac{3}{2}x\ dx$$To help you convert your calculations into technical writing format, the `kilojoule` library provides a few convenience functions in its `display` module to automate the $\LaTeX$ creation process. The `display.Calculations()` class will trigger the a process that attempts to convert the code in the current cell to $\LaTeX$ and show the progression of the calculations from symbolic to final numerical form.To demonstrate the use of `display.Calculations()` we'll step through the evaluation and display of the function $\psi=ax-cx^2+\frac{b}{x}$. We'll start by defining values for `a`, `b`, and `x` The `kilojoule` library provides a routine for drawing psychrometric charts to visualize humid air systems. *Note: this can be used to generate psychrometric charts for non-standard pressures and unit systems*
###Code
from kilojoule.display import Calculations
from kilojoule.units import Quantity
a = Quantity(3.2,'psi')
b = Quantity(1,'kPa')
x = 2
Calculations();
###Output
_____no_output_____
###Markdown
In this example, the lines defining `a`, `b`, and `x` were simple definitions involving no mathematical operations, so they are shown in simple form. By placing the line `display.Calculations();` at the end of the cell, we trigger a sequence where the code in the cell is parsed for strings of letters resembling equations and displays them with $\LaTeX$ formatting. In the next cell we will define `c` as being equal to `a`.
###Code
c = a
Calculations();
###Output
_____no_output_____
###Markdown
In this example, we see 3 terms rather than two. This line still has no mathematical operations, but there is a train of logic where we are setting $c$ equal to $a$. While it is important to show the numerical value of $c$ as being $3.2\ \mathrm{psi}$, it is also important (possibly more important) to show the process that led to $c$ having that value, so we show the symbolic form of the expression $c=a$ as well.Let's now evaluate a full equation with mathematical operations.
###Code
psi = a*x - c*x**2 + b/x
Calculations();
###Output
_____no_output_____
###Markdown
In this example the equation is expressed in 3 lines. The first line shows the symbolic form of the equation, which shows the reader the process or logic that is being applied. The second line shows numerical values in place of each symbol, which shows the propagation of information from earlier calculations. Finally the third line shows the numerical value resulting from the calculation. *Note: this is the form you should use when writing out calculations by hand as well.* Also, notice that the variable name `psi` was recognized as being a Greek letter and converted to the $\LaTeX$ equivalent of `\psi`. This will work for most cases if you define your variable names carefully.Using the `display.Calculations()` command will allow you to properly format your calculations, but you will still need to provide a narrative discussion to describe your process to the reader. You can do this in a Jupyter notebook by interspersing `Markdown` cells like this one between your equations, or you can place your narrative in your code as comments that will be shown in your output using the `comments=True` (by default) option for the `display.Calculations()` class.
###Code
# You can place comments in your code to describe your process.
# The comments will be processed as `Markdown` so you can apply **formatting** if desired
# For instance, let's calculate the amount of heat transfer required to decrease the temperature of air from 400 K to 300 K in a constant pressure process assuming constant specific heat. We can start by defining some known parameters,
T_1 = Quantity(400,'K')
T_2 = Quantity(300,'K')
c_p = Quantity(1.005,'kJ/kg/K')
# We can then solve the first law for $Q$ and substitute $c_p\Delta T$ for $\Delta h$
Q_1_to_2 = c_p*(T_2-T_1)
calcs = Calculations(comments=True);
###Output
_____no_output_____
###Markdown
You may have noticed that in the example above, we stored the result of `Calculations()` in the variable `calcs`. This gives us access to the $\LaTeX$ used to generate the output, which can be accessed at `calcs.output`. This can be useful if you are learning $\LaTeX$ and want to see how an equation was created or if you want export the $\LaTeX$ code for inclusion in another document.
###Code
print(calcs.output)
###Output
You can place comments in your code to describe your process. <br/> The comments will be processed as `Markdown` so you can apply **formatting** if desired<br/> For instance, let's calculate the amount of heat transfer required to decrease the temperature of air from 400 K to 300 K in a constant pressure process assuming constant specific heat. We can start by defining some known parameters, <br/>\[
\begin{aligned}
{ T_{1} }&={ \left( 400\,\mathrm{K} \right) } = { 400\ \mathrm{K} }
\end{aligned}
\]
\[
\begin{aligned}
{ T_{2} }&={ \left( 300\,\mathrm{K} \right) } = { 300\ \mathrm{K} }
\end{aligned}
\]
\[
\begin{aligned}
{ c_{p} }&={ \left( 1.005\,\frac{\mathrm{kJ}}{\left(\mathrm{K} \cdot \mathrm{kg}\right)} \right) } = { 1.005\ \frac{\mathrm{kJ}}{\left(\mathrm{K} \cdot \mathrm{kg}\right)} }
\end{aligned}
\]
We can then solve the first law for $Q$ and substitute $c_p\Delta T$ for $\Delta h$<br/>\[
\begin{aligned}{ Q_{1\to{}2} }&={ c_{p} \left(T_{2} - T_{1}\right) }\\
&={ \left( 1.005\ \frac{\mathrm{kJ}}{\left(\mathrm{K} \cdot \mathrm{kg}\right)} \right) \left(\left( 300\ \mathrm{K} \right) - \left( 400\ \mathrm{K} \right)\right) }\\
&={ -100.5\ \frac{\mathrm{kJ}}{\mathrm{kg}} }
\end{aligned}
\]
###Markdown
The `kilojoule` library also provides a quick way to show the current value of all the quantities and property tables defined in the local namespace using the `display.Summary()` class, just the quantities using `display.Quantities()`, or just the property tables using `display.QuantityTables()`
###Code
import kilojoule as kj
kj.display.Summary(n_col=4);
kj.display.Quantities(n_col=6);
kj.display.QuantityTables();
###Output
_____no_output_____ |
tutorial-contents-notebooks/202_variable.ipynb | ###Markdown
202 VariableView more, visit my tutorial page: https://morvanzhou.github.io/tutorials/My Youtube Channel: https://www.youtube.com/user/MorvanZhouDependencies:* torch: 0.1.11Variable in torch is to build a computational graph,but this graph is dynamic compared with a static graph in Tensorflow or Theano.So torch does not have placeholder, torch can just pass variable to the computational graph.
###Code
import torch
from torch.autograd import Variable
tensor = torch.FloatTensor([[1,2],[3,4]]) # build a tensor
variable = Variable(tensor, requires_grad=True) # build a variable, usually for compute gradients
print(tensor) # [torch.FloatTensor of size 2x2]
print(variable) # [torch.FloatTensor of size 2x2]
###Output
1 2
3 4
[torch.FloatTensor of size 2x2]
Variable containing:
1 2
3 4
[torch.FloatTensor of size 2x2]
###Markdown
Till now the tensor and variable seem the same.However, the variable is a part of the graph, it's a part of the auto-gradient.
###Code
t_out = torch.mean(tensor*tensor) # x^2
v_out = torch.mean(variable*variable) # x^2
print(t_out)
print(v_out)
v_out.backward() # backpropagation from v_out
###Output
_____no_output_____
###Markdown
$$ v_{out} = {{1} \over {4}} sum(variable^2) $$the gradients w.r.t the variable, $$ {d(v_{out}) \over d(variable)} = {{1} \over {4}} 2 variable = {variable \over 2}$$let's check the result pytorch calculated for us below:
###Code
variable.grad
variable # this is data in variable format
variable.data # this is data in tensor format
variable.data.numpy() # numpy format
###Output
_____no_output_____
###Markdown
Note that we did `.backward()` on `v_out` but `variable` has been assigned new values on it's `grad`.As this line ```v_out = torch.mean(variable*variable)``` will make a new variable `v_out` and connect it with `variable` in computation graph.
###Code
type(v_out)
type(v_out.data)
###Output
_____no_output_____
###Markdown
202 VariableView more, visit my tutorial page: https://morvanzhou.github.io/tutorials/My Youtube Channel: https://www.youtube.com/user/MorvanZhouDependencies:* torch: 1.4.0Variable in torch is to build a computational graph,but this graph is dynamic compared with a static graph in Tensorflow or Theano.So torch does not have placeholder, torch can just pass variable to the computational graph.
###Code
import torch
from torch.autograd import Variable
tensor = torch.FloatTensor([[1,2],[3,4]]) # build a tensor
variable = Variable(tensor, requires_grad=True) # build a variable, usually for compute gradients
print(tensor) # [torch.FloatTensor of size 2x2]
print(variable) # [torch.FloatTensor of size 2x2]
tensor = torch.FloatTensor([[1,2],[3,4]])
variable = Variable(tensor, requires_grad=True)
print(tensor)
print(variable)
###Output
tensor([[1., 2.],
[3., 4.]])
tensor([[1., 2.],
[3., 4.]], requires_grad=True)
tensor([[1., 2.],
[3., 4.]])
tensor([[1., 2.],
[3., 4.]], requires_grad=True)
###Markdown
Till now the tensor and variable seem the same.However, the variable is a part of the graph, it's a part of the auto-gradient.
###Code
t_out = torch.mean(tensor*tensor) # x^2
v_out = torch.mean(variable*variable) # x^2
print(t_out)
print(v_out)
t_out = torch.mean(tensor*tensor)
v_out = torch.mean(variable*variable)
print(t_out)
print(v_out)
v_out.backward() # backpropagation from v_out
###Output
_____no_output_____
###Markdown
$$ v_{out} = {{1} \over {4}} sum(variable^2) $$the gradients w.r.t the variable, $$ {d(v_{out}) \over d(variable)} = {{1} \over {4}} 2 variable = {variable \over 2}$$let's check the result pytorch calculated for us below:
###Code
variable.grad
variable # this is data in variable format
variable.data # this is data in tensor format
variable.data.numpy() # numpy format
###Output
_____no_output_____
###Markdown
Note that we did `.backward()` on `v_out` but `variable` has been assigned new values on it's `grad`.As this line ```v_out = torch.mean(variable*variable)``` will make a new variable `v_out` and connect it with `variable` in computation graph.
###Code
type(v_out)
type(v_out.data)
###Output
_____no_output_____
###Markdown
202 VariableView more, visit my tutorial page: https://mofanpy.com/tutorials/My Youtube Channel: https://www.youtube.com/user/MorvanZhouDependencies:* torch: 0.1.11Variable in torch is to build a computational graph,but this graph is dynamic compared with a static graph in Tensorflow or Theano.So torch does not have placeholder, torch can just pass variable to the computational graph.
###Code
import torch
from torch.autograd import Variable
tensor = torch.FloatTensor([[1,2],[3,4]]) # build a tensor
variable = Variable(tensor, requires_grad=True) # build a variable, usually for compute gradients
print(tensor) # [torch.FloatTensor of size 2x2]
print(variable) # [torch.FloatTensor of size 2x2]
###Output
1 2
3 4
[torch.FloatTensor of size 2x2]
Variable containing:
1 2
3 4
[torch.FloatTensor of size 2x2]
###Markdown
Till now the tensor and variable seem the same.However, the variable is a part of the graph, it's a part of the auto-gradient.
###Code
t_out = torch.mean(tensor*tensor) # x^2
v_out = torch.mean(variable*variable) # x^2
print(t_out)
print(v_out)
v_out.backward() # backpropagation from v_out
###Output
_____no_output_____
###Markdown
$$ v_{out} = {{1} \over {4}} sum(variable^2) $$the gradients w.r.t the variable, $$ {d(v_{out}) \over d(variable)} = {{1} \over {4}} 2 variable = {variable \over 2}$$let's check the result pytorch calculated for us below:
###Code
variable.grad
variable # this is data in variable format
variable.data # this is data in tensor format
variable.data.numpy() # numpy format
###Output
_____no_output_____
###Markdown
Note that we did `.backward()` on `v_out` but `variable` has been assigned new values on it's `grad`.As this line ```v_out = torch.mean(variable*variable)``` will make a new variable `v_out` and connect it with `variable` in computation graph.
###Code
type(v_out)
type(v_out.data)
###Output
_____no_output_____
###Markdown
202 VariableView more, visit my tutorial page: https://morvanzhou.github.io/tutorials/My Youtube Channel: https://www.youtube.com/user/MorvanZhouVariable in torch is to build a computational graph,but this graph is dynamic compared with a static graph in Tensorflow or Theano.So torch does not have placeholder, torch can just pass variable to the computational graph.
###Code
import torch
from torch.autograd import Variable
tensor = torch.FloatTensor([[1,2],[3,4]]) # build a tensor
variable = Variable(tensor, requires_grad=True) # build a variable, usually for compute gradients
print(tensor) # [torch.FloatTensor of size 2x2]
print(variable) # [torch.FloatTensor of size 2x2]
###Output
tensor([[1., 2.],
[3., 4.]])
tensor([[1., 2.],
[3., 4.]], requires_grad=True)
###Markdown
Till now the tensor and variable seem the same.However, the variable is a part of the graph, it's a part of the auto-gradient.
###Code
t_out = torch.mean(tensor*tensor) # x^2
v_out = torch.mean(variable*variable) # x^2
print(t_out)
print(v_out)
v_out.backward() # backpropagation from v_out
###Output
_____no_output_____
###Markdown
$$ v_{out} = {{1} \over {4}} sum(variable^2) $$the gradients w.r.t the variable, $$ {d(v_{out}) \over d(variable)} = {{1} \over {4}} 2 variable = {variable \over 2}$$let's check the result pytorch calculated for us below:
###Code
variable.grad
variable # this is data in variable format
variable.data # this is data in tensor format
variable.data.numpy() # numpy format
###Output
_____no_output_____
###Markdown
Note that we did `.backward()` on `v_out` but `variable` has been assigned new values on it's `grad`.As this line ```v_out = torch.mean(variable*variable)``` will make a new variable `v_out` and connect it with `variable` in computation graph.
###Code
type(v_out)
type(v_out.data)
###Output
_____no_output_____
###Markdown
202 VariableView more, visit my tutorial page: https://morvanzhou.github.io/tutorials/My Youtube Channel: https://www.youtube.com/user/MorvanZhouDependencies:* torch: 0.5.0a0 (0.4.1+)Variable in torch is to build a computational graph,but this graph is dynamic compared with a static graph in Tensorflow or Theano.So torch does not have placeholder, torch can just pass variable to the computational graph.
###Code
import torch
from torch.autograd import Variable
tensor = torch.FloatTensor([[1,2],[3,4]]) # build a tensor
variable = Variable(tensor, requires_grad=True) # build a variable, usually for compute gradients
# Varialbe() can only accept tensor as its argument, it cannot accept list or np.array
# TypeError: Variable data has to be a tensor, but got list
# TypeError: Variable data has to be a tensor, but got numpy.ndarray
print(tensor) # [torch.FloatTensor of size 2x2]
print(variable) # [torch.FloatTensor of size 2x2]
###Output
tensor([[1., 2.],
[3., 4.]])
tensor([[1., 2.],
[3., 4.]], requires_grad=True)
###Markdown
Till now the tensor and variable seem the same.However, the variable is a part of the graph, it's a part of the auto-gradient.
###Code
t_out = torch.mean(tensor*tensor) # x^2
v_out = torch.mean(variable*variable) # x^2
print(t_out)
print(v_out)
v_out.backward() # backpropagation from v_out
###Output
_____no_output_____
###Markdown
$$ v_{out} = {{1} \over {4}} sum(variable^2) $$the gradients w.r.t the variable, $$ {d(v_{out}) \over d(variable)} = {{1} \over {4}} 2 variable = {variable \over 2}$$let's check the result pytorch calculated for us below:
###Code
variable.grad
variable # this is data in variable format
variable.data # this is data in tensor format
variable.data.numpy() # numpy format
###Output
_____no_output_____
###Markdown
Note that we did `.backward()` on `v_out` but `variable` has been assigned new values on it's `grad`.As this line ```v_out = torch.mean(variable*variable)``` will make a new variable `v_out` and connect it with `variable` in computation graph.
###Code
type(v_out)
type(v_out.data)
###Output
_____no_output_____ |
Xanadu4.ipynb | ###Markdown
###Code
pip install pennylane
pip install pennylane-qchem
import pennylane as qml
from pennylane import numpy as np
###Output
_____no_output_____
###Markdown
Qunatum Chemistry with PannyLane
###Code
# Specify the molecule and its parameters
geometry = 'h2.xyz' #atomic species + x,y,z coordinates
charge = 0 # num of electrons in molecule
multiplicity = 1 #occupiesity of orbital of molecule
basis_set = 'sto-3g'
name = 'h2'
h, nr_qubits = qml.qchem.generate_hamiltonian(name,
geometry,
charge,
multiplicity,
basis_set,
n_active_electrons = 2,
n_active_orbitals = 2,
mapping = 'jordan_wigner'
)
print("Number of qubits = ", nr_qubits)
print("Hamiltonian is ", h)
dev = qml.device('default.qubit', wires = nr_qubits)
def circuit(params, wires):
qml.BasisState(np.array([1,1,0,0]), wires=wires)
for i in wires:
qml.Rot(*params[i], wires=i)
qml.CNOT(wires=[2,3])
qml.CNOT(wires=[2,0])
qml.CNOT(wires=[3,1])
cost_fn = qml.VQECost(circuit, h ,dev)
opt = qml.GradientDescentOptimizer(stepsize=0.4)
params = np.random.normal(0, np.pi, (nr_qubits, 3))
for n in range(max_iterations):
params = opt.step(cost_fn, params)
###Output
_____no_output_____ |
notebooks/layers/wrappers/TimeDistributed.ipynb | ###Markdown
TimeDistributed **[wrappers.TimeDistributed.0] wrap a Dense layer with units 4 (input: 3 x 6)**
###Code
data_in_shape = (3, 6)
layer_0 = Input(shape=data_in_shape)
layer_1 = TimeDistributed(Dense(4))(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(4000 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['wrappers.TimeDistributed.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
###Output
W shape: (6, 4)
W: [0.317596, 0.688515, -0.688309, -0.48247, 0.387223, -0.718263, 0.281673, -0.106311, 0.576861, -0.083926, 0.631691, 0.92647, 0.579655, -0.024215, -0.805793, -0.842947, -0.955415, 0.656415, 0.44667, 0.633739, 0.701525, 0.917507, -0.185671, -0.105247]
b shape: (4,)
b: [-0.332867, 0.650317, 0.995501, -0.458367]
in shape: (3, 6)
in: [-0.30351, 0.37881, -0.248093, 0.372204, -0.698964, -0.408058, -0.103801, 0.376217, -0.724015, 0.708616, -0.513219, -0.46074, -0.125163, -0.76111, -0.153798, 0.729255, 0.556458, -0.671966]
out shape: (3, 4)
out: [0.171595, -0.652137, 0.618031, -1.295817, -0.05994, -0.407387, 0.000875, -1.993142, -1.33639, 0.854801, 0.555804, -0.650907]
###Markdown
**[wrappers.TimeDistributed.1] wrap a Conv2D layer with 6 3x3 filters (input: 5x4x4x2)**
###Code
data_in_shape = (5, 4, 4, 2)
layer_0 = Input(shape=data_in_shape)
layer_1 = TimeDistributed(Conv2D(6, (3,3), data_format='channels_last'))(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(4010 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['wrappers.TimeDistributed.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
###Output
W shape: (3, 3, 2, 6)
W: [0.971827, -0.898904, -0.987921, 0.529589, 0.043586, -0.541366, 0.316759, 0.351387, -0.292323, 0.445466, -0.922655, 0.437413, -0.483267, -0.478014, 0.7408, -0.595028, -0.718381, 0.349594, -0.091293, 0.14291, 0.633818, -0.686841, -0.925272, -0.740397, 0.070594, 0.67408, 0.455314, -0.402251, 0.288807, 0.001378, 0.42892, -0.251869, 0.06113, -0.703784, 0.002676, 0.965023, 0.758788, 0.1193, 0.749321, -0.017408, -0.004115, 0.18981, -0.91507, 0.132792, -0.219057, 0.19682, -0.512841, 0.954544, 0.794403, -0.663179, -0.05377, -0.855038, -0.486641, 0.625844, -0.945869, -0.474979, 0.922345, -0.334843, -0.469456, -0.394364, 0.543681, -0.817676, 0.6093, -0.77635, -0.508683, 0.22456, 0.696262, 0.079806, -0.182646, -0.718939, 0.962504, -0.386231, 0.860488, -0.918945, -0.800484, -0.590285, 0.409804, -0.822098, 0.3489, -0.4508, 0.913208, -0.414455, 0.97663, 0.956314, -0.55547, 0.594094, -0.552044, -0.137467, 0.539049, -0.320055, -0.335577, 0.974746, -0.634747, 0.085161, -0.127183, -0.061717, -0.411844, 0.774181, 0.223395, 0.163937, -0.606967, 0.178549, -0.005153, 0.452476, 0.373127, -0.726827, -0.395458, -0.769671]
b shape: (6,)
b: [0.180389, 0.629217, -0.656262, -0.476575, -0.36398, 0.987756]
in shape: (5, 4, 4, 2)
in: [-0.579677, 0.883193, 0.651172, -0.820251, -0.64795, 0.857328, -0.4689, 0.356044, -0.641528, -0.531973, -0.33586, -0.438823, 0.682186, 0.215781, -0.401735, 0.169171, 0.869358, -0.204078, -0.661876, -0.616139, -0.453943, -0.569439, -0.25218, 0.156473, 0.194797, -0.923921, 0.652204, -0.11765, 0.86293, 0.314218, -0.878496, -0.364761, -0.647821, 0.296841, 0.280105, 0.2753, -0.959741, -0.148037, -0.489424, -0.88939, 0.704443, 0.08354, 0.930112, -0.87023, -0.212285, 0.750133, 0.343506, -0.82568, 0.391491, 0.149626, 0.003594, -0.181464, -0.499632, 0.20694, 0.1007, 0.39826, 0.609736, -0.765775, -0.728474, -0.011711, 0.543543, 0.174309, 0.105794, -0.009876, -0.694421, -0.157031, 0.670853, -0.581331, 0.739486, -0.886014, -0.637039, 0.725753, 0.61919, 0.447635, 0.167298, 0.164242, -0.615436, -0.503061, 0.981698, -0.392795, 0.532215, 0.761817, 0.735562, -0.236234, -0.856381, 0.22419, -0.221125, 0.133757, -0.011162, -0.88018, -0.433047, -0.825617, 0.693626, -0.185243, -0.824829, 0.07932, 0.336478, 0.370138, -0.685905, -0.462037, 0.563862, 0.490274, 0.934239, -0.129323, 0.717792, -0.73658, -0.939587, 0.796637, -0.131382, -0.79957, -0.271279, 0.816961, -0.082096, 0.64553, -0.106661, 0.651369, -0.843208, -0.221077, 0.758074, 0.156006, -0.429501, 0.191698, 0.988067, -0.277344, 0.757645, -0.877824, 0.053841, 0.394075, 0.786359, 0.735302, 0.247852, -0.310899, 0.703408, -0.848404, 0.455067, 0.295289, -0.629316, 0.626332, -0.075289, -0.442735, -0.219408, -0.766048, 0.303257, 0.142211, 0.910002, -0.780858, 0.333242, -0.533434, 0.572575, 0.355883, -0.671924, 0.22028, -0.505951, -0.317892, 0.609641, -0.360548, 0.490007, 0.441024, 0.660294, 0.850007]
out shape: (5, 2, 2, 6)
out: [2.089554, -2.186939, -1.436176, -0.951733, -0.212962, 2.449681, 1.053569, -0.592297, -0.875753, -0.803289, -0.834779, -0.568349, -0.842922, 3.976765, -1.054281, 0.581773, 0.235047, 0.103039, -0.079684, 0.225164, -2.408352, -1.116154, 1.561833, -0.491674, 2.43274, -0.158393, -0.874487, -1.96851, -0.106465, 1.602375, 0.941225, 0.480547, 0.002478, 1.246195, -1.388929, -1.133004, 1.476556, -0.459852, -2.130519, -0.126113, -1.162246, 1.398016, -0.61384, 1.539333, -0.466156, 0.0395, 0.506595, -1.590958, -1.044266, 0.736233, 0.61792, -0.923799, 1.275832, 1.491487, 1.903216, -2.385963, -1.553725, -0.554848, -0.456638, 1.645426, 0.690056, 0.190637, -2.015925, 1.143469, -2.530136, 1.025159, -0.150503, 2.627801, -1.352068, 1.245647, 1.235627, -0.915363, 0.682646, 0.854592, -0.030856, 0.949627, 1.204568, 1.052329, -0.942961, 2.039314, 0.892454, -1.925232, 0.046332, 2.315713, -2.358422, 1.724373, -1.528506, 1.794933, 0.342617, -0.191888, -0.026605, 0.475714, -1.332559, -1.158213, 0.028725, 1.890396, -0.305622, 0.890336, -3.426138, 1.245994, -2.027975, -0.505022, 1.32001, 0.477823, -2.460816, -0.984189, 1.221664, 0.339475, 1.26535, 2.228118, 0.207158, -0.455112, -0.64988, 0.688864, 0.574933, 1.911587, -1.642423, -1.385077, 0.744757, -0.567276]
###Markdown
export for Keras.js tests
###Code
print(json.dumps(DATA))
###Output
{"wrappers.TimeDistributed.0": {"expected": {"data": [0.171595, -0.652137, 0.618031, -1.295817, -0.05994, -0.407387, 0.000875, -1.993142, -1.33639, 0.854801, 0.555804, -0.650907], "shape": [3, 4]}, "input": {"data": [-0.30351, 0.37881, -0.248093, 0.372204, -0.698964, -0.408058, -0.103801, 0.376217, -0.724015, 0.708616, -0.513219, -0.46074, -0.125163, -0.76111, -0.153798, 0.729255, 0.556458, -0.671966], "shape": [3, 6]}, "weights": [{"data": [0.317596, 0.688515, -0.688309, -0.48247, 0.387223, -0.718263, 0.281673, -0.106311, 0.576861, -0.083926, 0.631691, 0.92647, 0.579655, -0.024215, -0.805793, -0.842947, -0.955415, 0.656415, 0.44667, 0.633739, 0.701525, 0.917507, -0.185671, -0.105247], "shape": [6, 4]}, {"data": [-0.332867, 0.650317, 0.995501, -0.458367], "shape": [4]}]}, "wrappers.TimeDistributed.1": {"expected": {"data": [2.089554, -2.186939, -1.436176, -0.951733, -0.212962, 2.449681, 1.053569, -0.592297, -0.875753, -0.803289, -0.834779, -0.568349, -0.842922, 3.976765, -1.054281, 0.581773, 0.235047, 0.103039, -0.079684, 0.225164, -2.408352, -1.116154, 1.561833, -0.491674, 2.43274, -0.158393, -0.874487, -1.96851, -0.106465, 1.602375, 0.941225, 0.480547, 0.002478, 1.246195, -1.388929, -1.133004, 1.476556, -0.459852, -2.130519, -0.126113, -1.162246, 1.398016, -0.61384, 1.539333, -0.466156, 0.0395, 0.506595, -1.590958, -1.044266, 0.736233, 0.61792, -0.923799, 1.275832, 1.491487, 1.903216, -2.385963, -1.553725, -0.554848, -0.456638, 1.645426, 0.690056, 0.190637, -2.015925, 1.143469, -2.530136, 1.025159, -0.150503, 2.627801, -1.352068, 1.245647, 1.235627, -0.915363, 0.682646, 0.854592, -0.030856, 0.949627, 1.204568, 1.052329, -0.942961, 2.039314, 0.892454, -1.925232, 0.046332, 2.315713, -2.358422, 1.724373, -1.528506, 1.794933, 0.342617, -0.191888, -0.026605, 0.475714, -1.332559, -1.158213, 0.028725, 1.890396, -0.305622, 0.890336, -3.426138, 1.245994, -2.027975, -0.505022, 1.32001, 0.477823, -2.460816, -0.984189, 1.221664, 0.339475, 1.26535, 2.228118, 0.207158, -0.455112, -0.64988, 0.688864, 0.574933, 1.911587, -1.642423, -1.385077, 0.744757, -0.567276], "shape": [5, 2, 2, 6]}, "input": {"data": [-0.579677, 0.883193, 0.651172, -0.820251, -0.64795, 0.857328, -0.4689, 0.356044, -0.641528, -0.531973, -0.33586, -0.438823, 0.682186, 0.215781, -0.401735, 0.169171, 0.869358, -0.204078, -0.661876, -0.616139, -0.453943, -0.569439, -0.25218, 0.156473, 0.194797, -0.923921, 0.652204, -0.11765, 0.86293, 0.314218, -0.878496, -0.364761, -0.647821, 0.296841, 0.280105, 0.2753, -0.959741, -0.148037, -0.489424, -0.88939, 0.704443, 0.08354, 0.930112, -0.87023, -0.212285, 0.750133, 0.343506, -0.82568, 0.391491, 0.149626, 0.003594, -0.181464, -0.499632, 0.20694, 0.1007, 0.39826, 0.609736, -0.765775, -0.728474, -0.011711, 0.543543, 0.174309, 0.105794, -0.009876, -0.694421, -0.157031, 0.670853, -0.581331, 0.739486, -0.886014, -0.637039, 0.725753, 0.61919, 0.447635, 0.167298, 0.164242, -0.615436, -0.503061, 0.981698, -0.392795, 0.532215, 0.761817, 0.735562, -0.236234, -0.856381, 0.22419, -0.221125, 0.133757, -0.011162, -0.88018, -0.433047, -0.825617, 0.693626, -0.185243, -0.824829, 0.07932, 0.336478, 0.370138, -0.685905, -0.462037, 0.563862, 0.490274, 0.934239, -0.129323, 0.717792, -0.73658, -0.939587, 0.796637, -0.131382, -0.79957, -0.271279, 0.816961, -0.082096, 0.64553, -0.106661, 0.651369, -0.843208, -0.221077, 0.758074, 0.156006, -0.429501, 0.191698, 0.988067, -0.277344, 0.757645, -0.877824, 0.053841, 0.394075, 0.786359, 0.735302, 0.247852, -0.310899, 0.703408, -0.848404, 0.455067, 0.295289, -0.629316, 0.626332, -0.075289, -0.442735, -0.219408, -0.766048, 0.303257, 0.142211, 0.910002, -0.780858, 0.333242, -0.533434, 0.572575, 0.355883, -0.671924, 0.22028, -0.505951, -0.317892, 0.609641, -0.360548, 0.490007, 0.441024, 0.660294, 0.850007], "shape": [5, 4, 4, 2]}, "weights": [{"data": [0.971827, -0.898904, -0.987921, 0.529589, 0.043586, -0.541366, 0.316759, 0.351387, -0.292323, 0.445466, -0.922655, 0.437413, -0.483267, -0.478014, 0.7408, -0.595028, -0.718381, 0.349594, -0.091293, 0.14291, 0.633818, -0.686841, -0.925272, -0.740397, 0.070594, 0.67408, 0.455314, -0.402251, 0.288807, 0.001378, 0.42892, -0.251869, 0.06113, -0.703784, 0.002676, 0.965023, 0.758788, 0.1193, 0.749321, -0.017408, -0.004115, 0.18981, -0.91507, 0.132792, -0.219057, 0.19682, -0.512841, 0.954544, 0.794403, -0.663179, -0.05377, -0.855038, -0.486641, 0.625844, -0.945869, -0.474979, 0.922345, -0.334843, -0.469456, -0.394364, 0.543681, -0.817676, 0.6093, -0.77635, -0.508683, 0.22456, 0.696262, 0.079806, -0.182646, -0.718939, 0.962504, -0.386231, 0.860488, -0.918945, -0.800484, -0.590285, 0.409804, -0.822098, 0.3489, -0.4508, 0.913208, -0.414455, 0.97663, 0.956314, -0.55547, 0.594094, -0.552044, -0.137467, 0.539049, -0.320055, -0.335577, 0.974746, -0.634747, 0.085161, -0.127183, -0.061717, -0.411844, 0.774181, 0.223395, 0.163937, -0.606967, 0.178549, -0.005153, 0.452476, 0.373127, -0.726827, -0.395458, -0.769671], "shape": [3, 3, 2, 6]}, {"data": [0.180389, 0.629217, -0.656262, -0.476575, -0.36398, 0.987756], "shape": [6]}]}}
###Markdown
TimeDistributed **[wrappers.TimeDistributed.0] wrap a Dense layer with units 4 (input: 3 x 6)**
###Code
random_seed = 2000
data_in_shape = (3, 6)
layer_0 = Input(shape=data_in_shape)
layer_1 = TimeDistributed(Dense(4))(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
np.random.seed(random_seed)
data_in = 2 * np.random.random(data_in_shape) - 1
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(random_seed + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
DATA['wrappers.TimeDistributed.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
###Output
_____no_output_____
###Markdown
**[wrappers.TimeDistributed.1] wrap a Conv2D layer with 6 3x3 filters (input: 5x4x4x2)**
###Code
random_seed = 2000
data_in_shape = (5, 4, 4, 2)
layer_0 = Input(shape=data_in_shape)
layer_1 = TimeDistributed(Conv2D(6, (3,3), data_format='channels_last'))(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
np.random.seed(random_seed)
data_in = 2 * np.random.random(data_in_shape) - 1
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(random_seed + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
DATA['wrappers.TimeDistributed.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
###Output
_____no_output_____
###Markdown
export for Keras.js tests
###Code
import os
filename = '../../../test/data/layers/wrappers/TimeDistributed.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
print(json.dumps(DATA))
###Output
{"wrappers.TimeDistributed.0": {"input": {"data": [0.141035, 0.129058, -0.023116, -0.327044, -0.248264, 0.064072, -0.863787, 0.169058, -0.524204, -0.678487, -0.695762, -0.745862, -0.345118, 0.388308, -0.282067, 0.782731, -0.59624, -0.778795], "shape": [3, 6]}, "weights": [{"data": [0.141035, 0.129058, -0.023116, -0.327044, -0.248264, 0.064072, -0.863787, 0.169058, -0.524204, -0.678487, -0.695762, -0.745862, -0.345118, 0.388308, -0.282067, 0.782731, -0.59624, -0.778795, 0.055114, 0.735311, -0.476251, -0.00121, -0.142871, 0.060008], "shape": [6, 4]}, {"data": [-0.665749, -0.838004, 0.920451, 0.8769], "shape": [4]}], "expected": {"data": [-0.435401, -0.729574, 0.891208, 0.435142, 0.449463, -0.303688, 1.418705, 0.491531, -0.206694, 0.102946, 0.646889, 1.393312], "shape": [3, 4]}}, "wrappers.TimeDistributed.1": {"input": {"data": [0.141035, 0.129058, -0.023116, -0.327044, -0.248264, 0.064072, -0.863787, 0.169058, -0.524204, -0.678487, -0.695762, -0.745862, -0.345118, 0.388308, -0.282067, 0.782731, -0.59624, -0.778795, 0.055114, 0.735311, -0.476251, -0.00121, -0.142871, 0.060008, 0.147894, -0.216289, -0.840972, 0.734562, -0.670993, 0.606963, -0.424144, -0.462858, 0.434956, 0.762811, 0.98424, -0.0833, 0.570259, 0.477388, -0.052834, -0.030331, 0.86601, 0.505308, -0.681422, -0.730379, -0.178646, 0.513073, -0.574974, -0.371941, -0.597453, 0.87685, 0.00883, 0.207463, 0.675097, 0.220365, 0.471146, -0.180468, -0.02072, 0.017849, 0.012965, 0.236682, 0.66921, 0.173131, -0.957385, 0.471247, 0.841267, 0.511354, -0.430488, 0.899198, 0.679766, 0.6299, 0.487356, 0.829739, 0.792468, -0.759192, -0.25087, -0.47263, -0.357234, 0.453933, 0.475895, -0.071184, 0.528075, -0.516599, 0.807998, 0.143212, -0.373437, -0.952796, 0.040104, 0.160803, -0.743346, 0.534637, 0.92295, -0.646198, 0.692993, 0.776042, -0.474673, 0.98543, 0.184755, -0.30293, -0.022748, 0.135056, 0.643602, 0.502182, 0.218371, -0.033313, 0.643199, 0.824619, -0.750012, 0.913739, 0.493227, -0.223239, 0.962789, -0.051118, -0.649982, 0.028419, 0.749459, 0.525479, -0.045743, -0.979086, -0.669124, -0.35604, -0.444705, 0.763697, 0.780956, 0.99184, 0.253779, -0.425107, 0.913943, 0.032233, 0.04514, 0.302513, 0.234363, -0.133738, 0.062949, -0.420645, 0.063063, 0.515796, -0.982415, 0.2082, 0.685465, 0.948766, 0.572355, 0.526904, -0.070458, 0.363551, 0.61422, 0.201742, 0.831489, 0.011985, -0.426671, 0.597698, -0.259001, 0.138813, 0.482189, -0.30467, 0.396641, 0.487531, 0.455809, 0.803298, 0.753098, 0.123971], "shape": [5, 4, 4, 2]}, "weights": [{"data": [0.141035, 0.129058, -0.023116, -0.327044, -0.248264, 0.064072, -0.863787, 0.169058, -0.524204, -0.678487, -0.695762, -0.745862, -0.345118, 0.388308, -0.282067, 0.782731, -0.59624, -0.778795, 0.055114, 0.735311, -0.476251, -0.00121, -0.142871, 0.060008, 0.147894, -0.216289, -0.840972, 0.734562, -0.670993, 0.606963, -0.424144, -0.462858, 0.434956, 0.762811, 0.98424, -0.0833, 0.570259, 0.477388, -0.052834, -0.030331, 0.86601, 0.505308, -0.681422, -0.730379, -0.178646, 0.513073, -0.574974, -0.371941, -0.597453, 0.87685, 0.00883, 0.207463, 0.675097, 0.220365, 0.471146, -0.180468, -0.02072, 0.017849, 0.012965, 0.236682, 0.66921, 0.173131, -0.957385, 0.471247, 0.841267, 0.511354, -0.430488, 0.899198, 0.679766, 0.6299, 0.487356, 0.829739, 0.792468, -0.759192, -0.25087, -0.47263, -0.357234, 0.453933, 0.475895, -0.071184, 0.528075, -0.516599, 0.807998, 0.143212, -0.373437, -0.952796, 0.040104, 0.160803, -0.743346, 0.534637, 0.92295, -0.646198, 0.692993, 0.776042, -0.474673, 0.98543, 0.184755, -0.30293, -0.022748, 0.135056, 0.643602, 0.502182, 0.218371, -0.033313, 0.643199, 0.824619, -0.750012, 0.913739], "shape": [3, 3, 2, 6]}, {"data": [-0.665749, -0.838004, 0.920451, 0.8769, 0.241718, -0.148796], "shape": [6]}], "expected": {"data": [-1.275021, -0.839414, 2.261197, 1.382413, -1.349136, -0.458723, 0.035198, 0.059575, 3.289305, -0.112961, 1.894473, 0.056991, 1.030516, -1.41503, 3.639513, 1.312105, 0.7178, 1.037027, -0.345448, -1.125389, 2.561248, 1.919182, 2.434602, -0.628632, -1.548767, -0.63035, 1.276973, 2.352928, 0.291083, -0.290137, -0.282871, -1.546028, 1.196641, 0.191184, -1.299763, -0.378339, -1.150905, -2.640953, 1.198463, 1.328465, 0.673755, 0.544141, -0.052356, -0.622702, 1.430426, 1.480166, 0.264735, 0.941609, -0.76932, -0.524748, -0.097613, -0.037621, 0.036755, 0.66339, -1.016433, -0.324838, -1.200336, 0.691055, 0.364328, -1.434147, -0.663102, -1.222518, 2.413622, 0.589465, 2.182118, 0.722914, 1.059836, -2.253524, 1.283919, 2.72919, -1.923285, 3.112732, 0.110403, -2.895395, -0.192991, 2.223259, 0.511342, 0.309826, -1.961463, -0.378234, -1.644435, 0.665466, -0.049178, -2.035013, -0.420975, -2.059799, 0.095355, -0.333549, -1.101064, 0.390433, -1.458754, -2.171488, -0.077547, 0.216551, -0.499852, -0.68166, -0.994649, -1.396949, 0.72207, 1.646976, -1.717063, 1.138718, 0.033565, -1.549827, 1.732043, 2.246031, 0.799031, 1.133765, -1.329821, -0.199264, 1.268272, 3.943292, -0.412988, 1.989369, 0.331454, -1.667789, 1.287873, 1.011695, -0.393542, 0.922799], "shape": [5, 2, 2, 6]}}}
###Markdown
TimeDistributed **[wrappers.TimeDistributed.0] wrap a Dense layer with output_dim 4 (input: 3 x 6)**
###Code
data_in_shape = (3, 6)
layer_0 = Input(shape=data_in_shape)
layer_1 = TimeDistributed(Dense(4))(layer_0)
model = Model(input=layer_0, output=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(4000 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
print('')
print('in shape:', data_in_shape)
print('in:', format_decimal(data_in.ravel().tolist()))
result = model.predict(np.array([data_in]))
print('out shape:', result[0].shape)
print('out:', format_decimal(result[0].ravel().tolist()))
###Output
W shape: (6, 4)
W: [0.317596, 0.688515, -0.688309, -0.48247, 0.387223, -0.718263, 0.281673, -0.106311, 0.576861, -0.083926, 0.631691, 0.92647, 0.579655, -0.024215, -0.805793, -0.842947, -0.955415, 0.656415, 0.44667, 0.633739, 0.701525, 0.917507, -0.185671, -0.105247]
b shape: (4,)
b: [-0.332867, 0.650317, 0.995501, -0.458367]
in shape: (3, 6)
in: [-0.30351, 0.37881, -0.248093, 0.372204, -0.698964, -0.408058, -0.103801, 0.376217, -0.724015, 0.708616, -0.513219, -0.46074, -0.125163, -0.76111, -0.153798, 0.729255, 0.556458, -0.671966]
out shape: (3, 4)
out: [0.171595, -0.652137, 0.618031, -1.295817, -0.05994, -0.407387, 0.000875, -1.993142, -1.33639, 0.854801, 0.555804, -0.650907]
###Markdown
**[wrappers.TimeDistributed.1] wrap a Convolution2D layer with 6 3x3 filters (input: 5x4x4x2)**
###Code
data_in_shape = (5, 4, 4, 2)
layer_0 = Input(shape=data_in_shape)
layer_1 = TimeDistributed(Convolution2D(6, 3, 3, dim_ordering='tf'))(layer_0)
model = Model(input=layer_0, output=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(4010 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
weight_names = ['W', 'b']
for w_i, w_name in enumerate(weight_names):
print('{} shape:'.format(w_name), weights[w_i].shape)
print('{}:'.format(w_name), format_decimal(weights[w_i].ravel().tolist()))
data_in = 2 * np.random.random(data_in_shape) - 1
print('')
print('in shape:', data_in_shape)
print('in:', format_decimal(data_in.ravel().tolist()))
result = model.predict(np.array([data_in]))
print('out shape:', result[0].shape)
print('out:', format_decimal(result[0].ravel().tolist()))
###Output
W shape: (3, 3, 2, 6)
W: [0.971827, -0.898904, -0.987921, 0.529589, 0.043586, -0.541366, 0.316759, 0.351387, -0.292323, 0.445466, -0.922655, 0.437413, -0.483267, -0.478014, 0.7408, -0.595028, -0.718381, 0.349594, -0.091293, 0.14291, 0.633818, -0.686841, -0.925272, -0.740397, 0.070594, 0.67408, 0.455314, -0.402251, 0.288807, 0.001378, 0.42892, -0.251869, 0.06113, -0.703784, 0.002676, 0.965023, 0.758788, 0.1193, 0.749321, -0.017408, -0.004115, 0.18981, -0.91507, 0.132792, -0.219057, 0.19682, -0.512841, 0.954544, 0.794403, -0.663179, -0.05377, -0.855038, -0.486641, 0.625844, -0.945869, -0.474979, 0.922345, -0.334843, -0.469456, -0.394364, 0.543681, -0.817676, 0.6093, -0.77635, -0.508683, 0.22456, 0.696262, 0.079806, -0.182646, -0.718939, 0.962504, -0.386231, 0.860488, -0.918945, -0.800484, -0.590285, 0.409804, -0.822098, 0.3489, -0.4508, 0.913208, -0.414455, 0.97663, 0.956314, -0.55547, 0.594094, -0.552044, -0.137467, 0.539049, -0.320055, -0.335577, 0.974746, -0.634747, 0.085161, -0.127183, -0.061717, -0.411844, 0.774181, 0.223395, 0.163937, -0.606967, 0.178549, -0.005153, 0.452476, 0.373127, -0.726827, -0.395458, -0.769671]
b shape: (6,)
b: [0.180389, 0.629217, -0.656262, -0.476575, -0.36398, 0.987756]
in shape: (5, 4, 4, 2)
in: [-0.579677, 0.883193, 0.651172, -0.820251, -0.64795, 0.857328, -0.4689, 0.356044, -0.641528, -0.531973, -0.33586, -0.438823, 0.682186, 0.215781, -0.401735, 0.169171, 0.869358, -0.204078, -0.661876, -0.616139, -0.453943, -0.569439, -0.25218, 0.156473, 0.194797, -0.923921, 0.652204, -0.11765, 0.86293, 0.314218, -0.878496, -0.364761, -0.647821, 0.296841, 0.280105, 0.2753, -0.959741, -0.148037, -0.489424, -0.88939, 0.704443, 0.08354, 0.930112, -0.87023, -0.212285, 0.750133, 0.343506, -0.82568, 0.391491, 0.149626, 0.003594, -0.181464, -0.499632, 0.20694, 0.1007, 0.39826, 0.609736, -0.765775, -0.728474, -0.011711, 0.543543, 0.174309, 0.105794, -0.009876, -0.694421, -0.157031, 0.670853, -0.581331, 0.739486, -0.886014, -0.637039, 0.725753, 0.61919, 0.447635, 0.167298, 0.164242, -0.615436, -0.503061, 0.981698, -0.392795, 0.532215, 0.761817, 0.735562, -0.236234, -0.856381, 0.22419, -0.221125, 0.133757, -0.011162, -0.88018, -0.433047, -0.825617, 0.693626, -0.185243, -0.824829, 0.07932, 0.336478, 0.370138, -0.685905, -0.462037, 0.563862, 0.490274, 0.934239, -0.129323, 0.717792, -0.73658, -0.939587, 0.796637, -0.131382, -0.79957, -0.271279, 0.816961, -0.082096, 0.64553, -0.106661, 0.651369, -0.843208, -0.221077, 0.758074, 0.156006, -0.429501, 0.191698, 0.988067, -0.277344, 0.757645, -0.877824, 0.053841, 0.394075, 0.786359, 0.735302, 0.247852, -0.310899, 0.703408, -0.848404, 0.455067, 0.295289, -0.629316, 0.626332, -0.075289, -0.442735, -0.219408, -0.766048, 0.303257, 0.142211, 0.910002, -0.780858, 0.333242, -0.533434, 0.572575, 0.355883, -0.671924, 0.22028, -0.505951, -0.317892, 0.609641, -0.360548, 0.490007, 0.441024, 0.660294, 0.850007]
out shape: (5, 2, 2, 6)
out: [2.089554, -2.186939, -1.436176, -0.951733, -0.212962, 2.449681, 1.053569, -0.592297, -0.875753, -0.803289, -0.834779, -0.56835, -0.842922, 3.976766, -1.054281, 0.581773, 0.235047, 0.10304, -0.079684, 0.225164, -2.408352, -1.116154, 1.561833, -0.491674, 2.43274, -0.158394, -0.874487, -1.968509, -0.106465, 1.602375, 0.941225, 0.480547, 0.002478, 1.246196, -1.388929, -1.133004, 1.476556, -0.459852, -2.130519, -0.126113, -1.162246, 1.398016, -0.61384, 1.539333, -0.466156, 0.0395, 0.506595, -1.590957, -1.044266, 0.736233, 0.61792, -0.923799, 1.275832, 1.491487, 1.903215, -2.385962, -1.553725, -0.554848, -0.456638, 1.645426, 0.690055, 0.190637, -2.015925, 1.143469, -2.530135, 1.025159, -0.150503, 2.627801, -1.352068, 1.245647, 1.235627, -0.915363, 0.682647, 0.854592, -0.030856, 0.949627, 1.204568, 1.052329, -0.942961, 2.039315, 0.892454, -1.925232, 0.046332, 2.315713, -2.358421, 1.724373, -1.528506, 1.794933, 0.342617, -0.191888, -0.026605, 0.475714, -1.332559, -1.158213, 0.028725, 1.890396, -0.305622, 0.890336, -3.426138, 1.245994, -2.027975, -0.505022, 1.32001, 0.477822, -2.460816, -0.984189, 1.221664, 0.339474, 1.26535, 2.228118, 0.207158, -0.455113, -0.64988, 0.688864, 0.574933, 1.911588, -1.642422, -1.385078, 0.744757, -0.567276]
|
DEEP_NLP_resources/3-Deep-Learning for NLP/GloVe-Yelp-Comments-Classification.ipynb | ###Markdown
GloVe-Yelp-Comments-Classification
###Code
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = True
sess = tf.Session(config=config)
set_session(sess)
from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense, Dropout
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import EarlyStopping, ModelCheckpoint
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
import re
import nltk
import string
import plotly.offline as py
import plotly.graph_objs as go
py.init_notebook_mode(connected=True)
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from sklearn.manifold import TSNE
%matplotlib inline
###Output
_____no_output_____
###Markdown
Load the data:
###Code
df = pd.read_csv('../data/yelp.csv')
df.head()
df= df.dropna()
df=df[['text','stars']]
df.head()
labels = df['stars'].map(lambda x : 1 if int(x) > 3 else 0)
print(labels[10:20])
def clean_text(text):
## Remove puncuation
text = text.translate(string.punctuation)
## Convert words to lower case and split them
text = text.lower().split()
## Remove stop words
stops = set(stopwords.words("english"))
text = [w for w in text if not w in stops and len(w) >= 3]
text = " ".join(text)
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = " ".join(stemmed_words)
return text
df['text'] = df['text'].map(lambda x: clean_text(x))
df.head(10)
maxlen = 50
embed_dim = 100
max_words = 20000
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(df['text'])
sequences = tokenizer.texts_to_sequences(df['text'])
data = pad_sequences(sequences, maxlen=maxlen, padding='post')
data[0]
vocab_size = len(tokenizer.word_index) + 1
vocab_size
labels = np.asarray(labels)
print('Shape of data:', data.shape)
print('Shape of label:', labels.shape)
###Output
_____no_output_____
###Markdown
Creating datasets:
###Code
validation_split = .2
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
val_samples = int(validation_split * data.shape[0])
X_train = data[:-val_samples]
y_train = labels[:-val_samples]
x_val = data[-val_samples:]
y_val = labels[-val_samples:]
###Output
_____no_output_____
###Markdown
Load the GloVe embeddings
###Code
dir = '../data/GloVe/glove.6B'
embed_index = dict()
f = open(os.path.join(dir, 'glove.6B.100d.txt'), encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embed_index[word] = coefs
f.close()
print('%s Word vectors' % len(embed_index))
###Output
_____no_output_____
###Markdown
Create a weight matrix:
###Code
embed_matrix = np.zeros((max_words, embed_dim))
for word, i in tokenizer.word_index.items():
if i < max_words:
embed_vector = embed_index.get(word)
if embed_vector is not None:
embed_matrix[i] = embed_vector
###Output
_____no_output_____
###Markdown
Creating the model:
###Code
model = Sequential()
model.add(Embedding(max_words,
embed_dim,
weights=[embed_matrix],
input_length=maxlen))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=5, mode='min')
save_best = ModelCheckpoint('../data/yelp_comments.hdf', save_best_only=True,
monitor='val_loss', mode='min')
%%time
model.fit(X_train, y_train,
epochs=20,
validation_data=(x_val, y_val),
batch_size=128,
verbose=1,
callbacks=[early_stopping, save_best])
###Output
_____no_output_____
###Markdown
Making predictions:
###Code
model.load_weights(filepath = '../data/yelp_comments.hdf')
pred = model.predict(x_val)
###Output
_____no_output_____
###Markdown
Word embeddings visualization:
###Code
glove_embds = model.layers[0].get_weights()[0]
words = []
for word, i in tokenizer.word_index.items():
words.append(word)
###Output
_____no_output_____
###Markdown
Visualizing words:
###Code
def plot_words(data, start, stop, step):
trace = go.Scatter(
x = data[start:stop:step,0],
y = data[start:stop:step, 1],
mode = 'markers',
text= words[start:stop:step]
)
layout = dict(title= 't-SNE_factor1 vs t-SNE_factor2',
yaxis = dict(title='t-SNE_factor2'),
xaxis = dict(title='t-SNE_factor1'),
hovermode= 'closest')
fig = dict(data = [trace], layout= layout)
py.iplot(fig)
%%time
glove_tsne_embds = TSNE(n_components=2).fit_transform(glove_embds)
plot_words(glove_tsne_embds, 0, 100, 1)
###Output
_____no_output_____ |
Queries Elasticsearch.ipynb | ###Markdown
En este informe se explican las queries realizadas sobre los índices `wine`, `inn`, `maraton`, y `norte`, cuyo indexado en el servidor de Elasticsearch se realiza con el script `document-indexing.py`.Se muestra tanto el código necesario para realizarlas con la API REST de Elasticsearch (en Python) como el resultado obtenido.
###Code
import json
import requests
from datetime import datetime
ELASTIC = "http://localhost:9200" #dirección en la que está funcionando elastic
###Output
_____no_output_____
###Markdown
Índice Wine Este índice contiene datos sobre distintos vinos. Mejores 5 vinos de cada color Esta query busca los 5 vinos con mejor puntuación de cada color (*White* y *Red*). Se ha realizado mediante dos queries, una que incluye los vinos tintos y otra que incluye los blancos. Los resultados se limitan a los primeros 5 elementos ordenados según el campo **Score**.
###Code
query = {
"size" : 5,
"query":{
"match" : { "Color" : "Red" }
},
"sort" : {
"Score" : "desc"
}
}
reds = json.loads(requests.get(ELASTIC+"/wine/_search",json=query).text)["hits"]["hits"]
query = {
"size" : 5,
"query": {
"match" : { "Color" : "White" }
},
"sort" : {
"Score" : "desc"
}
}
whites = json.loads(requests.get(ELASTIC+"/wine/_search",json=query).text)["hits"]["hits"]
print("Vinos tintos:")
print("----------------")
for i, w in enumerate(reds):
wine = w["_source"]
print("#%d: %s, %d/100" % (i+1, wine["Name"], wine["Score"]))
print("")
print("Vinos blancos:")
print("----------------")
for i, w in enumerate(whites):
wine = w["_source"]
print("#%d: %s, %d/100" % (i+1, wine["Name"], wine["Score"]))
print("")
###Output
Vinos tintos:
----------------
#1: A Shot in the Dark Eleven Confessions Vineyard, 98/100
#2: Papas Block, 98/100
#3: In the Crosshairs Eleven Confessions Vineyard, 97/100
#4: Cabernet Sauvignon, 97/100
#5: Beckstoffer Dr. Crane Vineyard, 97/100
Vinos blancos:
----------------
#1: Green River Isobel, 95/100
#2: Boonflys Hill, 95/100
#3: McCrea Vineyard, 95/100
#4: Ma Belle-Fille, 95/100
#5: Durell Vineyard, 95/100
###Markdown
Precio medio y puntuación máxima del vino por tipo de uva Esta query divide los vinos por su tipo de uva, y para cada uva calcula la media de su precio y el máximo de puntuación obtenido por los vinos de dicha uva. Se ha realizado mediante un *aggregation* que primero divide los elementos por su campo **Grape**, y dentro de cada división realiza dos *aggregations*, una calcula *avg* del campo **Price** y otra *max* del campo **Score**.
###Code
query = {
"aggs" : {
"uvas" : {
"terms" : {
"field" : "Grape.keyword"
},
"aggs":{
"precio-medio-por-uva": {
"avg" : {
"field" : "Price"
}
},
"puntuacion-maxima-por-uva" : {
"max" : {
"field" : "Score"
}
}
}
}
}
}
uvas = json.loads(requests.get(ELASTIC+"/wine/_search",json=query).text)["aggregations"]["uvas"]["buckets"]
for obj in uvas:
print("%s: %d vinos" % (obj["key"], obj["doc_count"]))
print("Precio medio: $%0.2f" % (obj["precio-medio-por-uva"]["value"]))
print("Puntuación máxima: %d/100" % (obj["puntuacion-maxima-por-uva"]["value"]))
print("")
###Output
Pinot Noir: 109 vinos
Precio medio: $46.98
Puntuación máxima: 95/100
Chardonnay: 104 vinos
Precio medio: $41.93
Puntuación máxima: 95/100
Zinfandel: 78 vinos
Precio medio: $28.94
Puntuación máxima: 95/100
Cabernet Sauvingnon: 68 vinos
Precio medio: $81.29
Puntuación máxima: 97/100
Syrah: 61 vinos
Precio medio: $44.95
Puntuación máxima: 98/100
Sauvignon Blanc: 46 vinos
Precio medio: $24.02
Puntuación máxima: 93/100
Grenache: 10 vinos
Precio medio: $68.80
Puntuación máxima: 97/100
Merlot: 10 vinos
Precio medio: $30.20
Puntuación máxima: 89/100
Petite Sirah: 6 vinos
Precio medio: $41.83
Puntuación máxima: 91/100
Barbera: 1 vinos
Precio medio: $17.00
Puntuación máxima: 87/100
###Markdown
Precio medio de vinos producidos antes de 2007 Esta query encuentra los vinos producidos en años anteriores a 2007, y calcula el precio medio. Se ha realizado mediante una *query* que restrinje el campo **Year** a valores menores que 2007, y una *aggregation* que calcula la media del campo **Price**.
###Code
query = {
"query":{
"range" : {
"Year" : {
"lt" : 2007
}
}
},
"aggs":{
"precio-medio": {
"avg" : {
"field" : "Price"
}
}
}
}
precio = json.loads(requests.get(ELASTIC+"/wine/_search",json=query).text)["aggregations"]["precio-medio"]["value"]
print("Precio medio de los vinos anteriores a 2007: $%0.2f" % precio)
###Output
Precio medio de los vinos anteriores a 2007: $53.37
###Markdown
Índice Inn Este índice contiene información sobre distintas reservas realizadas en un motel. Reservas de duración mayor a 10 días Esta query busca aquellas reservas que hayan durado más de una semana y media (redondeado a 10 días). Para ello se ha utilizado una query con script, un tipo especial de query que permite la utilización de código que devuelva verdadero o falso, condición que será aplicada a cada documento, devolviendo aquellos donde la condición es cierta. En este caso la condición es que la diferencia entre la fecha **CheckOut** y **CheckIn** sea mayor que 10, expresada en días. Elasticsearch no permite operaciones directamente con fechas, por lo que pasamos los valores a milisegundos, y tras la resta lo volvemos a convertir en días.
###Code
query = {
"size": 10000,
"query":
{
"script" : {
"script" : {
"source": "return (doc['CheckOut'].value.getMillis() - doc['CheckIn'].value.getMillis())/(1000*60*60*24) > 10"
}
}
}
}
res = json.loads(requests.get(ELASTIC+"/inn/_search",json=query).text)["hits"]["hits"]
for r in res:
reservation = r["_source"]
ckin = datetime.strptime(reservation["CheckIn"], "%d-%b-%y")
ckout = datetime.strptime(reservation["CheckOut"], "%d-%b-%y")
print("%s %s: Entró %s, Salió %s, duración = %d días" % (reservation["FirstName"], reservation["LastName"],reservation["CheckIn"],
reservation["CheckOut"],(ckout-ckin).days))
print("")
###Output
LEWIS TRUDEN: Entró 21-Jun-10, Salió 02-Jul-10, duración = 11 días
GUS DERKAS: Entró 01-Oct-10, Salió 13-Oct-10, duración = 12 días
OLYMPIA ALBROUGH: Entró 14-Oct-10, Salió 25-Oct-10, duración = 11 días
LESTER PANOS: Entró 22-Nov-10, Salió 04-Dec-10, duración = 12 días
DENNY PERRINO: Entró 07-Mar-10, Salió 19-Mar-10, duración = 12 días
ELMIRA ATTEBURG: Entró 21-Nov-10, Salió 02-Dec-10, duración = 11 días
ISREAL BISHOFF: Entró 01-Jun-10, Salió 12-Jun-10, duración = 11 días
MAXIE STEBNER: Entró 15-Jul-10, Salió 28-Jul-10, duración = 13 días
PORTER STRACK: Entró 02-Sep-10, Salió 14-Sep-10, duración = 12 días
HERB CRACE: Entró 06-Jan-10, Salió 18-Jan-10, duración = 12 días
MARINE CASMORE: Entró 22-May-10, Salió 03-Jun-10, duración = 12 días
RODERICK BRODOWSKI: Entró 12-Jan-10, Salió 25-Jan-10, duración = 13 días
ASUNCION TIPPIN: Entró 24-Apr-10, Salió 06-May-10, duración = 12 días
FRANCISCO BOSE: Entró 03-Oct-10, Salió 14-Oct-10, duración = 11 días
GRANT KNERIEN: Entró 12-Dec-10, Salió 25-Dec-10, duración = 13 días
AUNDREA EASTLING: Entró 30-May-10, Salió 12-Jun-10, duración = 13 días
CODY EERKES: Entró 18-Aug-10, Salió 30-Aug-10, duración = 12 días
BLAINE WILCUTT: Entró 30-Dec-10, Salió 12-Jan-11, duración = 13 días
GILBERT HUIZINGA: Entró 14-Feb-10, Salió 25-Feb-10, duración = 11 días
STEPHEN PLYMEL: Entró 19-Mar-10, Salió 01-Apr-10, duración = 13 días
REINALDO MUHLESTEIN: Entró 23-Jun-10, Salió 05-Jul-10, duración = 12 días
BERNITA SCARPINO: Entró 01-Aug-10, Salió 14-Aug-10, duración = 13 días
MIREYA NEUZIL: Entró 17-May-10, Salió 29-May-10, duración = 12 días
FAVIOLA MORRISSETTE: Entró 26-Feb-10, Salió 09-Mar-10, duración = 11 días
SUZANNE DUB: Entró 19-Jun-10, Salió 02-Jul-10, duración = 13 días
###Markdown
Personas que se hayan alojado más de una vez Esta query busca personas con más de una reserva en el sistema. Para ello se ha utilizado una *aggregation* que clasifica los documentos en grupos por el nombre completo (realizable mediante un script que une los campos **FirstName** y **LastName** en uno), y para cada clasificación, implementa una *aggregation* de tipo *bucket_selector*, que permite obtener la cuenta de documentos en cada grupo, y quedarse sólo con aquellos grupos donde dicha cuenta sea mayor que 1.
###Code
query = {
"size":0,
"aggs": {
"persona":{
"terms" : {
"script": "params['_source'].FirstName+' '+params['_source'].LastName"
},
"aggs":{
"reservas":{
"bucket_selector": {
"buckets_path": {
"cuenta" : "_count"
},
"script": {
"source" : "params.cuenta > 1"
}
}
}
}
}
}
}
res = json.loads(requests.get(ELASTIC+"/inn/_search",json=query).text)["aggregations"]["persona"]["buckets"]
for r in res:
print("%s se ha alojado %d veces" % (r["key"], r["doc_count"]))
###Output
EMERY VOLANTE se ha alojado 3 veces
GLEN DONIGAN se ha alojado 3 veces
GRANT KNERIEN se ha alojado 3 veces
ALEXIS FINEFROCK se ha alojado 2 veces
AMIEE PENEZ se ha alojado 2 veces
ARON KEBEDE se ha alojado 2 veces
AUGUST MAEWEATHER se ha alojado 2 veces
BENTON AKHTAR se ha alojado 2 veces
BO DURAN se ha alojado 2 veces
CATHERIN KUDRON se ha alojado 2 veces
###Markdown
Número de reservas al mes para camas de tipo *Queen* Esta query devuelve el número de reservas al mes en habitaciones que tengan cama tipo *Queen*. Para ello se realiza una búsqueda de las reservas cuya habitación tenga el tipo de cama *Queen* mediante el campo **bedType** dentro del objeto **Room**, y sobre estos resultados se realiza una *aggregation* en la que se agrupan los resultados en grupos según el mes de la reserva, que se puede obtener mediante un script, que llama al método *getMonth* aplicado al campo de tipo fecha **CheckIn**.
###Code
query = {
"size" : 0,
"query" : {
"match" : { "Room.bedType" : "Queen" }
},
"aggs" : {
"reservas-al-mes" : {
"terms" : {
"script" : "doc['CheckIn'].value.getMonth()"
}
}
}
}
res = json.loads(requests.get(ELASTIC+"/inn/_search",json=query).text)["aggregations"]["reservas-al-mes"]["buckets"]
for r in res:
print("En el mes %s: %d reservas de camas de tipo Queen" % (r["key"],r["doc_count"]))
###Output
En el mes AUGUST: 25 reservas de camas de tipo Queen
En el mes DECEMBER: 24 reservas de camas de tipo Queen
En el mes APRIL: 21 reservas de camas de tipo Queen
En el mes JULY: 21 reservas de camas de tipo Queen
En el mes OCTOBER: 21 reservas de camas de tipo Queen
En el mes NOVEMBER: 19 reservas de camas de tipo Queen
En el mes SEPTEMBER: 19 reservas de camas de tipo Queen
En el mes JUNE: 18 reservas de camas de tipo Queen
En el mes MAY: 18 reservas de camas de tipo Queen
En el mes JANUARY: 16 reservas de camas de tipo Queen
###Markdown
Precio total de las reservas realizadas por *EMERY VOLANTE* Esta query calcula el precio total de las reservas del cliente EMERY VOLANTE, que como vimos anteriormente se ha alojado 3 veces. Esta query consiste en buscar las reservas cuyo campo **FirstName** sea EMERY y **LastName** sea VOLANTE, y sobre el resultado aplicar una *aggregation* que sume el coste de las reservas, calculado con un script que multiplica el campo **Rate** (precio de habitación por día) por los días que se ha alojado (calculado de la misma manera que en la primera query).
###Code
query = {
"query" : {
"bool" : {
"must" : [{"match" : { "FirstName" : "EMERY"}}, {"match" : { "LastName" : "VOLANTE"}}]
}
},
"aggs" : {
"precio-total" : {
"sum" : {
"script" : {
"source" : "(doc.CheckOut.value.getMillis() - doc.CheckIn.value.getMillis())/(1000*60*60*24) * doc.Rate.value"
}
}
}
}
}
res = json.loads(requests.get(ELASTIC+"/inn/_search",json=query).text)["aggregations"]
print("Precio total de todas las reservas realizadas por EMERY VOLANTE: $%0.2f" % res["precio-total"]["value"])
print("")
###Output
Precio total de todas las reservas realizadas por EMERY VOLANTE: $843.75
###Markdown
Índice Maraton Este índice contiene varios datos sobre los corredores de una maratón. Media, mejor y peor tiempo por grupo de edad Esta query devuelve, por cada grupo de edad, la media, mínimo, y máximo tiempo obtenido. Para ello se realiza una *aggregation* que divide los documentos en los distintos grupos de edad, y para cada grupo se aplica, sobre el campo **Time** operaciones *avg*, *min* y *max*.
###Code
query = {
"size" : 0,
"aggs" : {
"tiempos_por_grupo" : {
"terms" : {
"field" : "Group.keyword"
},
"aggs" : {
"tiempos" : {
"avg" : {
"field" : "Time"
}
},
"mejor-tiempo" : {
"min" : {
"field" : "Time"
}
},
"peor-tiempo" : {
"max" : {
"field" : "Time"
}
}
}
}
}
}
res = json.loads(requests.get(ELASTIC+"/maraton/_search",json=query).text)["aggregations"]["tiempos_por_grupo"]["buckets"]
for r in res:
print("Grupo de edad %s: %d corredores, tiempo medio %s, \
tiempo perdedor: %s, tiempo ganador: %s" % (r["key"], r["doc_count"],
r["tiempos"]["value_as_string"],
r["peor-tiempo"]["value_as_string"],
r["mejor-tiempo"]["value_as_string"]))
###Output
Grupo de edad 20-39: 272 corredores, tiempo medio 1:47:28, tiempo perdedor: 2:41:04, tiempo ganador: 1:09:47
Grupo de edad 40-49: 139 corredores, tiempo medio 1:46:51, tiempo perdedor: 2:37:23, tiempo ganador: 1:18:36
Grupo de edad 50-59: 38 corredores, tiempo medio 1:51:09, tiempo perdedor: 2:27:23, tiempo ganador: 1:26:51
Grupo de edad 60-98: 7 corredores, tiempo medio 1:54:26, tiempo perdedor: 2:05:21, tiempo ganador: 1:37:56
Grupo de edad 01-19: 5 corredores, tiempo medio 1:44:14, tiempo perdedor: 2:04:02, tiempo ganador: 1:21:24
Grupo de edad 99-+: 2 corredores, tiempo medio 2:01:10, tiempo perdedor: 2:22:11, tiempo ganador: 1:40:09
###Markdown
Mejor posición por Estado Esta query devuelve, por cada estado, la mínima posición obtenida. Para ello se realiza una *aggregation* que divide los documentos en los distintos estados, y para cada grupo se calcula el *min* del campo **Place**.
###Code
query = {
"size" : 0,
"aggs" : {
"estados" : {
"terms" : {
"field" : "State.keyword"
},
"aggs" : {
"mejor" : {
"min" : {
"field" : "Place"
}
},
}
}
}
}
res = json.loads(requests.get(ELASTIC+"/maraton/_search",json=query).text)["aggregations"]["estados"]["buckets"]
for r in res:
print("%s: %d corredores, mejor posición %d" % (r["key"], r["doc_count"], r["mejor"]["value"]))
###Output
RI: 190 corredores, mejor posición 3
MA: 182 corredores, mejor posición 1
CT: 32 corredores, mejor posición 20
NH: 20 corredores, mejor posición 12
FL: 9 corredores, mejor posición 17
NC: 6 corredores, mejor posición 54
MO: 5 corredores, mejor posición 7
IN: 4 corredores, mejor posición 58
NJ: 4 corredores, mejor posición 69
PA: 4 corredores, mejor posición 100
###Markdown
Corredores con ritmo entre 6 y 8 minutos/km que hayan quedado por encima del puesto 5 en su grupo Esta query encuentra los corredores con ritmos en el rango entre 6 y 8 minutos que además hayan quedado por encima del puesto 5 para su grupo de edad. Para ello se utilizan dos condiciones en la búsqueda, que el campo **Pace** sea mayor o igual que 6 minutos y menor o igual que 8 minutos, y que **GroupPlace** sea menor o igual que 5.
###Code
query = {
"size": 10000,
"query" : {
"bool" : {
"must" : [{
"range" : {
"Pace" : {
"gte" : "0:6:00",
"lt" : "0:8:00"
}
}},
{"range" : {
"GroupPlace" : {
"lte" : 5
}
}
}]
}
}
}
res = json.loads(requests.get(ELASTIC+"/maraton/_search",json=query).text)["hits"]["hits"]
for re in res:
r = re["_source"]
print("%s, Pace: %s, Puesto en su grupo(%s) %d" % (r["FirstName"]+" "+r["LasName"], r["Pace"], r["Group"], r["GroupPlace"]))
###Output
CRUZ TAILOR, Pace: 0:6:00, Puesto en su grupo(40-49) 1
BRENTON FAGO, Pace: 0:6:03, Puesto en su grupo(40-49) 2
CHARLES FARLESS, Pace: 0:6:03, Puesto en su grupo(40-49) 3
WILBER VANORDER, Pace: 0:6:04, Puesto en su grupo(40-49) 4
KENDRICK HOLZ, Pace: 0:6:13, Puesto en su grupo(01-19) 1
JAMA PEAD, Pace: 0:6:16, Puesto en su grupo(20-39) 1
ROMEO UNVARSKY, Pace: 0:6:18, Puesto en su grupo(40-49) 5
ALEXIS HABERMANN, Pace: 0:6:38, Puesto en su grupo(50-59) 1
MAGGIE NASR, Pace: 0:6:39, Puesto en su grupo(20-39) 2
FRANCIS LAMSON, Pace: 0:6:42, Puesto en su grupo(50-59) 2
HEATH KIRVIN, Pace: 0:7:02, Puesto en su grupo(50-59) 3
NEDRA NASSIMI, Pace: 0:7:03, Puesto en su grupo(20-39) 3
FLORIDA AYTES, Pace: 0:7:03, Puesto en su grupo(40-49) 1
LETTIE MCKAGUE, Pace: 0:7:05, Puesto en su grupo(20-39) 4
MAIRA ROUTHIER, Pace: 0:7:11, Puesto en su grupo(20-39) 5
GIOVANNI FYLES, Pace: 0:7:17, Puesto en su grupo(50-59) 4
STEPHANI FERTITTA, Pace: 0:7:18, Puesto en su grupo(40-49) 2
WESLEY BECKWORTH, Pace: 0:7:19, Puesto en su grupo(50-59) 5
CEDRIC FROILAND, Pace: 0:7:29, Puesto en su grupo(60-98) 1
JOSUE WOODAL, Pace: 0:7:39, Puesto en su grupo(99-+) 1
REUBEN JOHNSON, Pace: 0:7:51, Puesto en su grupo(01-19) 2
MIGUELINA BOWLAND, Pace: 0:7:56, Puesto en su grupo(40-49) 3
DANELLE TROIA, Pace: 0:8:00, Puesto en su grupo(40-49) 4
STANFORD RIVERIA, Pace: 0:8:00, Puesto en su grupo(01-19) 3
ANTONY CRAGIN, Pace: 0:8:00, Puesto en su grupo(60-98) 2
###Markdown
Número de BIB de los diez mejores corredores Esta query devuelve el número de BIB de los 10 mejores corredores. Para ello se realiza una búsqueda de los documentos donde el campo **Place** sea menor o igual que 10.
###Code
query = {
"size": 10,
"query" : {
"range" : {
"Place" : {
"lte" : "10"
}
}
}
}
res = json.loads(requests.get(ELASTIC+"/maraton/_search",json=query).text)["hits"]["hits"]
for re in res:
r = re["_source"]
print("%s, puesto %s, BIB: %s" % (r["FirstName"]+" "+r["LasName"], r["Place"], r["BIBNumber"]))
###Output
ANDRE PELLAM, puesto 1, BIB: 340
HORACE KARPOWICZ, puesto 2, BIB: 34
MILES ROSELLA, puesto 3, BIB: 399
FRANKLYN GIRARDIN, puesto 4, BIB: 18
AL LEMASTER, puesto 5, BIB: 262
CRUZ TAILOR, puesto 6, BIB: 308
DEWEY HEAIVILIN, puesto 7, BIB: 45
HYMAN CURIE, puesto 8, BIB: 16
BRENTON FAGO, puesto 9, BIB: 42
CHARLES FARLESS, puesto 10, BIB: 420
###Markdown
Índice norte Este índice contiene noticias publicadas por el Norte de Castilla durante el año 2006. Artículos de la sección "Internacional" que contengan la frase "crisis económica" En esta query se buscan los artículos cuya sección sea Internacional que en el cuerpo de la noticia contengan la frase "crisis económica". Para ello se realiza una búsqueda con dos condiciones, que el campo **seccion** sea Internacional, y que el campo **cuerpo** contenga la frase "crisis económica".
###Code
query = {
"size" : 10000,
"query" : {
"bool" : {
"must" : [
{"match" : {"seccion" : "Internacional"}},
{"match_phrase" : {"cuerpo" : "crisis económica"}}
]
}
}
}
res = json.loads(requests.get(ELASTIC+"/norte/_search",json=query).text)["hits"]["hits"]
for re in res:
r = re["_source"]
print('"%s" [score %0.2f]' % (r["titulo"],re["_score"]))
if "resumen" in r:
print("\t"+r["resumen"])
print("")
###Output
"Los argentinos reciben con indiferencia la orden por la que los bancos devolverán en pesos los ahorros retenidos" [score 10.76]
Los afectados, insatisfechos con la solución, insistirán en sus demandas
"Las empresas españolas en América Latina temen nuevos cambios legales" [score 9.60]
La inseguridad jurídica es la principal razón que alegan las compañías para paralizar o reconsiderar sus inversiones en estos países
"Berlusconi acusa a los empresarios de aliarse con la oposición" [score 9.14]
La patronal italiana califica las palabras del primer ministro de «antidemocráticas»
"Ferviente nacionalista crítico con WashingtonEl peor dirigente de la historia del país" [score 8.15]
"Perú da otra oportunidad a Alan García, que dejó el país en bancarrota en 1990" [score 8.15]
El futuro presidente pide perdón por sus errores del pasado, de los que culpa al «apetito de poder», y arremete contra la injerencia de Chávez
"Una segunda oleada de vandalismo y enfrentamientos sacude Budapest" [score 7.10]
Cuatro policías se encuentran en estado grave, entre un total de 69 heridos Una de las teorías sobre el origen de la filtración apunta al propio Gyurcsány
"Italia vota con un resultado incierto y el riesgo de empate e ingobernabilidadAlgunas claves del voto" [score 7.10]
Berlusconi depende de los cuatro millones de electores que ha perdido y Prodi confía en superarle por la reunificación de la izquierda
###Markdown
Artículos publicados tal día como hoy en 2006 dentro de la sección Televisión En esta query se buscan los artículos que se hayan publicado en dd-MM-2006, siendo dd y MM el día y mes correspondiente al actual, y estén en la sección de Televisión. Para ello se obtiene el día y mes actual, y se realiza una búsqueda con dos condiciones, que el campo **seccion** sea Televisión, y que el campo **fecha** sea dd-MM-2006.
###Code
day = datetime.now().date().day
month = datetime.now().date().month
query = {
"size" : 10000,
"query" : {
"bool" : {
"must" : [
{"match" : { "seccion" : "Televisión" }},
{"match" : { "fecha" : str(day).zfill(2)+"-"+str(month).zfill(2)+"-2006" }}
]
}
}
}
res = json.loads(requests.get(ELASTIC+"/norte/_search",json=query).text)["hits"]["hits"]
for re in res:
r = re["_source"]
print('"%s"' % (r["titulo"]))
if "resumen" in r:
print("\t"+r["resumen"])
print("")
###Output
"«Aída es la mujer común de nuestra sociedad»"
La cadena de Mediaset estrena esta noche una nueva temporada de la comedia, que vuelve con novedades
"Superhuman"
"Finaliza el espacio 'Esta es mi tierra'"
"Debuta en Cuatro la versión española de 'Matrimonio con hijos'"
"'Actualidad en 2D' trata el islamismo"
###Markdown
Artículos publicados durante el mes de mayo que contengan la palabra "Eurovisión" En esta query se buscan los artículos que contengan la palabra "eurovisión" entre los artículos publicados en Mayo. Para ello se realiza una búsqueda con dos condiciones, que el campo **cuerpo** contenga la palabra eurovisión y que la fecha sea mayor o igual que 05-2006, lo que se traduce a mayor o igual que el 1 de Mayo de 2006, y menor que 06-2006, es decir, el 1 de Junio de 2006.
###Code
query = {
"size" : 10000,
"query" : {
"bool" : {
"must" : [
{"match" : { "cuerpo" : "eurovisión" }},
{"range" : {
"fecha" : {
"gte" : "05-2006",
"lt" : "06-2006",
"format" : "MM-yyyy"
}
}}
]
}
}
}
res = json.loads(requests.get(ELASTIC+"/norte/_search",json=query).text)["hits"]["hits"]
for re in res:
r = re["_source"]
print('"%s"' % (r["titulo"]))
if "resumen" in r:
print("\t"+r["resumen"])
print("")
###Output
"«Lo único que queremos es hacerlo muy bien»"
Las cuatro hermanas se muestran sorprendidas por el montaje del festival en la capital griega
"Todas la canciones de 'Eurovisión'"
"Las Ketchup visitan a Buenafuente"
"Bochorno"
"Casi cinco millones de espectadores vieron Eurovisión"
"Eurovisión"
"Carlos Lozano presentará en TVE1 el día 20 la gala del festival de Eurovisión"
El cuarteto cordobés Las Ketchup parte mañana hacia Atenas para defender 'Bloody Mary'
"«Intento disfrutar de cada momento, no miro más allá de mi sombra»"
La cantante recibe esta noche en el Museo Patio Herreriano de Valladolid un homenaje de la Academia de los Grammy Latinos
"Azehos impulsa la gastronomía con el primer festival de la tapa"
Un total de 36 establecimientos se inscriben en el concurso para potenciar los productos de la tierra Con el aperitivo como excusa, la organización invita a zamoranos y visitantes a seguir la ruta gastronómica
|
AI for Medical Prognosis/Week 4/.ipynb_checkpoints/C2_W4_lecture-checkpoint.ipynb | ###Markdown
AI4M Course 2 Week 4 lecture notebook Outline[One-hot encode categorical variables](one-hot-encoding)[Hazard function](hazard-function)[Permissible pairs with censoring and time](permissible-pairs) One-hot encode categorical variables
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Which features are categorical?
###Code
df = pd.DataFrame({'ascites': [0,1,0,1],
'edema': [0.5,0,1,0.5],
'stage': [3,4,3,4],
'cholesterol': [200.5,180.2,190.5,210.3]
})
df
###Output
_____no_output_____
###Markdown
In this small sample dataset, 'ascites', 'edema', and 'stage' are categorical variables- ascites: value is either 0 or 1- edema: value is either 0, 0.5 or 1- stage: is either 3 or 4'cholesterol' is a continuous variable, since it can be any decimal value greater than zero. Which categorical variables to one-hot encode?Of the categorical variables, which one should be one-hot encoded (turned into dummy variables)?- ascites: is already 0 or 1, so there is not a need to one-hot encode it. - We could one-hot encode ascites, but it is not necessary when there are just two possible values that are 0 or 1. - When values are 0 or 1, 1 means a disease is present, and 0 means normal (no disease).- edema: Edema is swelling in any part of the body. This data set's 'edema' feature has 3 categories, so we will want to one-hot encode it so that there is one feature column for each of the three possible values. - 0: No edema - 0.5: Patient has edema, but did not receive diuretic therapy (which is used to treat edema) - 1: Patient has edeam, despite also receiving diuretic therapy (so the condition may be more severe).- stage: has values of 3 and 4. We will want to one-hot encode these because they are not values of 0 or 1. - the "stage" of cancer is either 0, 1,2,3 or 4. - Stage 0 means there is no cancer. - Stage 1 is cancer that is limited to a small area of the body, also known as "early stage cancer" - Stage 2 is cancer that has spread to nearby tissues - stage 3 is cancer that has spread to nearby tissues, but more so than stage 2 - stage 4 is cancer that has spread to distant parts of the body, also known as "metastatic cancer". - We could convert stage 3 to 0 and stage 4 to 1 for the sake of training a model. This would may be confusing for anyone reviewing our code and data. We will one-hot encode the 'stage'. -You'll actually see that we end up with 0 representing stage 3 and 1 representing stage 4 (see the next section). Multi-collinearity of one-hot encoded featuresLet's see what happens when we one-hot encode the 'stage' feature.We'll use [pandas.get_dummies](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.get_dummies.html)
###Code
df_stage = pd.get_dummies(data=df,
columns=['stage']
)
df_stage[['stage_3','stage_4']]
###Output
_____no_output_____
###Markdown
What do you notice about the 'stage_3' and 'stage_4' features?Given that stage 3 and stage 4 are the only possible values for stage, If you know that patient 0 (row 0) has stage_3 set to 1, what can you say about that same patient's value for the stage_4 feature?- When stage_3 is 1, then stage_4 must be 0- When stage_3 is 0, then stage_4 must be 1This means that one of the feature columns is actually redundant. We should drop one of these features to avoid multicollinearity (where one feature can predict another feature).
###Code
df_stage
df_stage_drop_first = df_stage.drop(columns='stage_3')
df_stage_drop_first
###Output
_____no_output_____
###Markdown
Note, there's actually a parameter of pandas.get_dummies() that lets you drop the first one-hot encoded column. You'll practice doing this in this week's assignment! Make the numbers decimalsWe can cast the one-hot encoded values as floats by setting the data type to numpy.float64.- This is helpful if we are feeding data into a model, where the model expects a certain data type (such as a 64-bit float, 32-bit float etc.)
###Code
import numpy as np
df_stage = pd.get_dummies(data=df,
columns=['stage'],
)
df_stage[['stage_4']]
df_stage_float64 = pd.get_dummies(data=df,
columns=['stage'],
dtype=np.float64
)
df_stage_float64[['stage_4']]
###Output
_____no_output_____
###Markdown
This is the end of this practice section.Please continue on with the lecture videos!--- Hazard function Let's say we fit the hazard function$$\lambda(t, x) = \lambda_0(t)e^{\theta^T X_i}$$So that we have the coefficients $\theta$ for the features in $X_i$If you have a new patient, let's predict their hazard $\lambda(t,x)$
###Code
import numpy as np
import pandas as pd
lambda_0 = 1
coef = np.array([0.5,2.])
coef
X = pd.DataFrame({'age': [20,30,40],
'cholesterol': [180,220,170]
})
X
###Output
_____no_output_____
###Markdown
- First, let's multiply the coefficients to the features.- Check the shapes of the coefficients and the features to decide which one to transpose
###Code
coef.shape
X.shape
###Output
_____no_output_____
###Markdown
It looks like the coefficient is a 1D array, so transposing it won't do anything. - We can transpose the X so that we're multiplying a (2,) array by a (2,3) dataframe.So the formula looks more like this (transpose $X_i$ instead of $\theta$$$\lambda(t, x) = \lambda_0(t)e^{\theta X_i^T}$$- Let's multiply $\theta X_i^T$
###Code
np.dot(coef,X.T)
###Output
_____no_output_____
###Markdown
Calculate the hazard for the three patients (there are 3 rows in X)
###Code
lambdas = lambda_0 * np.exp(np.dot(coef,X.T))
patients_df = X.copy()
patients_df['hazards'] = lambdas
patients_df
###Output
_____no_output_____
###Markdown
This is the end of this practice section.Please continue on with the lecture videos!--- Permissible pairs with censoring and time
###Code
import pandas as pd
df = pd.DataFrame({'time': [2,4,2,4,2,4,2,4],
'event': [1,1,1,1,0,1,1,0],
'risk_score': [20,40,40,20,20,40,40,20]
})
df
###Output
_____no_output_____
###Markdown
We made this data sample so that you can compare pairs of patients visually. When at least one patient is not censored- A pair may be permissible if at least one patient is not censored.- If both pairs of patients are censored, then they are definitely not a permissible pair.
###Code
pd.concat([df.iloc[0:1],df.iloc[1:2]],axis=0)
if df['event'][0] == 1 or df['event'][1] == 1:
print(f"May be a permissible pair: 0 and 1")
else:
print(f"Definitely not permissible pair: 0 and 1")
pd.concat([df.iloc[4:5],df.iloc[7:8]],axis=0)
if df['event'][4] == 1 or df['event'][7] == 1:
print(f"May be a permissible pair: 4 and 7")
else:
print(f"Definitely not permissible pair: 4 and 7")
###Output
_____no_output_____
###Markdown
If neither patient was censored:- If both patients had an event (neither one was censored). This is definitely a permissible pair.
###Code
pd.concat([df.iloc[0:1],df.iloc[1:2]],axis=0)
if df['event'][0] == 1 and df['event'][1] == 1:
print(f"Definitely a permissible pair: 0 and 1")
else:
print(f"May be a permissible pair: 0 and 1")
###Output
_____no_output_____
###Markdown
When one patient is censored:- If we know that one patient was censored and one had an event, then we can check if censored patient's time is at least as great as the uncensored patient's time. If so, it's a permissible pair as well
###Code
pd.concat([df.iloc[6:7],df.iloc[7:8]],axis=0)
if df['time'][7] >= df['time'][6]:
print(f"Permissible pair: Censored patient 7 lasted at least as long as uncensored patient 6")
else:
print("Not a permisible pair")
pd.concat([df.iloc[4:5],df.iloc[5:6]],axis=0)
if df['time'][4] >= df['time'][5]:
print(f"Permissible pair")
else:
print("Not a permisible pair: censored patient 4 was censored before patient 5 had their event")
###Output
_____no_output_____ |
.ipynb_checkpoints/04 - Strings and Dictionaries-checkpoint.ipynb | ###Markdown
Strings Strings are ordered text based data which are represented by enclosing the same in single/double/triple quotes.
###Code
String0 = 'Taj Mahal is beautiful'
String1 = "Taj Mahal is beautiful"
String2 = '''Taj Mahal
is
beautiful'''
print String0 , type(String0)
print String1, type(String1)
print String2, type(String2)
###Output
Taj Mahal is beautiful <type 'str'>
Taj Mahal is beautiful <type 'str'>
Taj Mahal
is
beautiful <type 'str'>
###Markdown
String Indexing and Slicing are similar to Lists which was explained in detail earlier.
###Code
print String0[4]
print String0[4:]
###Output
M
Mahal is beautiful
###Markdown
Built-in Functions **find( )** function returns the index value of the given data that is to found in the string. If it is not found it returns **-1**. Remember to not confuse the returned -1 for reverse indexing value.
###Code
print String0.find('al')
print String0.find('am')
###Output
7
-1
###Markdown
The index value returned is the index of the first element in the input data.
###Code
print String0[7]
###Output
a
###Markdown
One can also input **find( )** function between which index values it has to search.
###Code
print String0.find('j',1)
print String0.find('j',1,3)
###Output
2
2
###Markdown
**capitalize( )** is used to capitalize the first element in the string.
###Code
String3 = 'observe the first letter in this sentence.'
print String3.capitalize()
###Output
Observe the first letter in this sentence.
###Markdown
**center( )** is used to center align the string by specifying the field width.
###Code
String0.center(70)
###Output
_____no_output_____
###Markdown
One can also fill the left out spaces with any other character.
###Code
String0.center(70,'-')
###Output
_____no_output_____
###Markdown
**zfill( )** is used for zero padding by specifying the field width.
###Code
String0.zfill(30)
###Output
_____no_output_____
###Markdown
**expandtabs( )** allows you to change the spacing of the tab character. '\t' which is by default set to 8 spaces.
###Code
s = 'h\te\tl\tl\to'
print s
print s.expandtabs(1)
print s.expandtabs()
###Output
h e l l o
h e l l o
h e l l o
###Markdown
**index( )** works the same way as **find( )** function the only difference is find returns '-1' when the input element is not found in the string but **index( )** function throws a ValueError
###Code
print String0.index('Taj')
print String0.index('Mahal',0)
###Output
0
4
###Markdown
**endswith( )** function is used to check if the given string ends with the particular char which is given as input.
###Code
print String0.endswith('y')
###Output
_____no_output_____
###Markdown
The start and stop index values can also be specified.
###Code
print String0.endswith('l',0)
print String0.endswith('M',0,5)
###Output
_____no_output_____
###Markdown
**count( )** function counts the number of char in the given string. The start and the stop index can also be specified or left blank. (These are Implicit arguments which will be dealt in functions)
###Code
print String0.count('a',0)
print String0.count('a',5,10)
###Output
_____no_output_____
###Markdown
**join( )** function is used add a char in between the elements of the input string.
###Code
'a'.join('*_-')
###Output
_____no_output_____
###Markdown
'*_-' is the input string and char 'a' is added in between each element **join( )** function can also be used to convert a list into a string.
###Code
a = list(String0)
print a
b = ''.join(a)
print b
###Output
_____no_output_____
###Markdown
Before converting it into a string **join( )** function can be used to insert any char in between the list elements.
###Code
c = '/'.join(a)[18:]
print c
###Output
_____no_output_____
###Markdown
**split( )** function is used to convert a string back to a list. Think of it as the opposite of the **join()** function.
###Code
d = c.split('/')
print d
###Output
_____no_output_____
###Markdown
In **split( )** function one can also specify the number of times you want to split the string or the number of elements the new returned list should conatin. The number of elements is always one more than the specified number this is because it is split the number of times specified.
###Code
e = c.split('/',3)
print e
print len(e)
###Output
_____no_output_____
###Markdown
**lower( )** converts any capital letter to small letter.
###Code
print String0
print String0.lower()
###Output
_____no_output_____
###Markdown
**upper( )** converts any small letter to capital letter.
###Code
String0.upper()
###Output
_____no_output_____
###Markdown
**replace( )** function replaces the element with another element.
###Code
String0.replace('Taj Mahal','Bengaluru')
###Output
_____no_output_____
###Markdown
**strip( )** function is used to delete elements from the right end and the left end which is not required.
###Code
f = ' hello '
###Output
_____no_output_____
###Markdown
If no char is specified then it will delete all the spaces that is present in the right and left hand side of the data.
###Code
f.strip()
###Output
_____no_output_____
###Markdown
**strip( )** function, when a char is specified then it deletes that char if it is present in the two ends of the specified string.
###Code
f = ' ***----hello---******* '
f.strip('*')
###Output
_____no_output_____
###Markdown
The asterisk had to be deleted but is not. This is because there is a space in both the right and left hand side. So in strip function. The characters need to be inputted in the specific order in which they are present.
###Code
print f.strip(' *')
print f.strip(' *-')
###Output
_____no_output_____
###Markdown
**lstrip( )** and **rstrip( )** function have the same functionality as strip function but the only difference is **lstrip( )** deletes only towards the left side and **rstrip( )** towards the right.
###Code
print f.lstrip(' *')
print f.rstrip(' *')
###Output
_____no_output_____
###Markdown
Dictionaries Dictionaries are more used like a database because here you can index a particular sequence with your user defined string. To define a dictionary, equate a variable to { } or dict()
###Code
d0 = {}
d1 = dict()
print type(d0), type(d1)
###Output
<type 'dict'> <type 'dict'>
###Markdown
Dictionary works somewhat like a list but with an added capability of assigning it's own index style.
###Code
d0['One'] = 1
d0['OneTwo'] = 12
print d0
###Output
{'OneTwo': 12, 'One': 1}
###Markdown
That is how a dictionary looks like. Now you are able to access '1' by the index value set at 'One'
###Code
print d0['One']
###Output
1
###Markdown
Two lists which are related can be merged to form a dictionary.
###Code
names = ['One', 'Two', 'Three', 'Four', 'Five']
numbers = [1, 2, 3, 4, 5]
###Output
_____no_output_____
###Markdown
**zip( )** function is used to combine two lists
###Code
d2 = zip(names,numbers)
print d2
###Output
[('One', 1), ('Two', 2), ('Three', 3), ('Four', 4), ('Five', 5)]
###Markdown
The two lists are combined to form a single list and each elements are clubbed with their respective elements from the other list inside a tuple. Tuples because that is what is assigned and the value should not change.Further, To convert the above into a dictionary. **dict( )** function is used.
###Code
a1 = dict(d2)
print a1
###Output
{'Four': 4, 'Five': 5, 'Three': 3, 'Two': 2, 'One': 1}
###Markdown
Built-in Functions **clear( )** function is used to erase the entire database that was created.
###Code
a1.clear()
print a1
###Output
{}
###Markdown
Dictionary can also be built using loops.
###Code
for i in range(len(names)):
a1[names[i]] = numbers[i]
print a1
###Output
{'Four': 4, 'Five': 5, 'Three': 3, 'Two': 2, 'One': 1}
###Markdown
**values( )** function returns a list with all the assigned values in the dictionary.
###Code
a1.values()
###Output
_____no_output_____
###Markdown
**keys( )** function returns all the index or the keys to which contains the values that it was assigned to.
###Code
a1.keys()
###Output
_____no_output_____
###Markdown
**items( )** is returns a list containing both the list but each element in the dictionary is inside a tuple. This is same as the result that was obtained when zip function was used.
###Code
a1.items()
###Output
_____no_output_____
###Markdown
**pop( )** function is used to get the remove that particular element and this removed element can be assigned to a new variable. But remember only the value is stored and not the key. Because the is just a index value.
###Code
a2 = a1.pop('Four')
print a1
print a2
###Output
{'Five': 5, 'Three': 3, 'Two': 2, 'One': 1}
4
|
mx_viz_examples.ipynb | ###Markdown
ExamplesBelow we show three examples of the mx_viz functions. Please see the `slide_figures.ipynb` and the OHBM 2020 multilayer network educational presentation for more details.
###Code
## Import packages
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
import multinetx as mx
import scipy.io as sio
import pandas as pd
import os
import sys
import mx_viz
print("Done importing packages :)")
###Output
finished defining functions
Done importing packages :)
###Markdown
Create a multilayer networkUsing multinetx, we will create a small multilayer (mx) network for plotting later. Note that the visualization functions will work best with _small_ mx networks. For larger mx networks, consider alternative plotting strategies found in this [2019 paper](https://onlinelibrary.wiley.com/doi/full/10.1111/cgf.13610).Note below we will assume that all nodes exist in every layer and that nodes only connect to their counterparts in other layers.
###Code
# Define number of nodes (number of nodes in largest layer).
nNodes = 10
# Define number of levels.
nLayers = 3
# Use multinetx to generate three graphs each on nNodes nodes.
g1 = mx.generators.erdos_renyi_graph(nNodes,0.5,seed=216)
g2 = mx.generators.erdos_renyi_graph(nNodes,0.5,seed=130)
g3 = mx.generators.erdos_renyi_graph(nNodes,0.5,seed=81)
# Define adjacency between layers. Here we only assign nodes to themselves in each layer.
adj_block = mx.lil_matrix(np.zeros((nNodes*nLayers,nNodes*nLayers)))
for i in np.arange(nLayers-1):
for l in np.arange(i+1,nLayers):
adj_block[i*nNodes:(i+1)*nNodes, (l)*nNodes:(l+1)*nNodes] = np.identity(nNodes)
adj_block += adj_block.T
# Create multilayer graph with mx.
mg = mx.MultilayerGraph(list_of_layers=[g1,g2,g3],
inter_adjacency_matrix=adj_block)
# Here we can set the edge weights to different values just so we can see which are inter- and intra-layer edges.
mg.set_edges_weights(intra_layer_edges_weight=2,
inter_layer_edges_weight=1)
## Plot the supra-adjacency matrix to check that we actually made a multilayer network.
fig = plt.figure(figsize=(6,5))
sns.heatmap(mx.adjacency_matrix(mg,weight='weight').todense())
plt.title('supra adjacency matrix');
###Output
_____no_output_____
###Markdown
Write to json and create visualization.The first mx_viz function writes the graph to a json file, while the second reads the file and creates an html file with the visualization. Below we will create two html files - one using the `theme="light"` flag and the other with `theme="dark"`.
###Code
# We'll use networkx positioning to get nice layouts. However pos is changed (for example by
# using a different nx.layout function), it should remain a dictionary mapping nodes to coordinate
# arrays with at least an x and y position.
pos = nx.layout.fruchterman_reingold_layout(g1, dim=3, k=2)
filename_json = "data/example1.json"
G2 = mx_viz.write_mx_to_json(filename_json,mg, nNodes, pos, nLayers)
filename_html_light = "visualization_output_example_light.html"
mx_viz.visualize(G2,theme="light",path_html=filename_html_light)
filename_html_dark = "visualization_output_example_dark.html"
mx_viz.visualize(G2,theme="dark",path_html=filename_html_dark);
###Output
_____no_output_____
###Markdown
Create a temporal networkTemporal networks are special types of multilayer networks in which the layers correspond to timepoints. Time has a natural ordering, so we create a slightly different visualization that respects the ordered layers.Again, we assume that all nodes exist in every layer and that nodes connect only to their counterparts in every layer.
###Code
# Define number of nodes (number of nodes in largest layer)
nNodes = 10
# Define number of timepoints (levels)
nLayers = 14
# Use multinetx to generate fourteen graphs each on nNodes nodes
graph_layers = [mx.generators.erdos_renyi_graph(nNodes,((i+1)/(nLayers*2+2)),seed=np.random.randint(1,300)) for i in np.arange(nLayers)]
# Define adjacency between layers. Here, again, we only assign nodes to themselves in each layer.
adj_block = mx.lil_matrix(np.zeros((nNodes*nLayers,nNodes*nLayers)))
for i in np.arange(nLayers-1):
for l in np.arange(i+1,nLayers):
adj_block[i*nNodes:(i+1)*nNodes, (l)*nNodes:(l+1)*nNodes] = np.identity(nNodes)
adj_block += adj_block.T
# Create multilayer graph
mg = mx.MultilayerGraph(list_of_layers=graph_layers,
inter_adjacency_matrix=adj_block)
# Set edge weights
mg.set_edges_weights(intra_layer_edges_weight=2,
inter_layer_edges_weight=1)
## Plot the supra-adjacency matrix
fig = plt.figure(figsize=(6,5))
sns.heatmap(mx.adjacency_matrix(mg,weight='weight').todense())
plt.title('supra adjacency matrix');
# As before, generate positions for nodes in the first layer.
pos = nx.layout.fruchterman_reingold_layout(graph_layers[0], dim=3)
filename_json = "data/example2.json"
G2 = mx_viz.write_mx_to_json(filename_json,mg, nNodes, pos, nLayers)
filename_html = "visualization_output_example_timeseries.html"
mx_viz.visualize_timeseries(G2, path_html=filename_html);
###Output
done writing mx to data/example2.json
Wrote visualization to: visualization_output_example_timeseries.html
|
src/notebooks/Mevon_AI_Speech_Emotion_Recognition_Demo.ipynb | ###Markdown
** Emotion Recognizer ** ◢ Mevon-AI - Recognize Emotions in SpeechThis program is for recognizing emotions from audio files generated in a customer care call center. A customer care call center of any company receives many calls from customers every day. Every call is recorded for analysis purposes. The program aims to analyse the emotions of the customer and employees from these recordings. The emotions are classified into 6 categories: 'Neutral', 'Happy', 'Sad', 'Angry', 'Fearful', 'Disgusted', 'Surprised'Analysing the emotions of the customer after they have spoken with the company's employee in the call center can allow the company to understand the customer's behaviour and rate the performance of its employees accordingly.**Credits:*** [Speech Emotion Recognition from Saaket Agashe's Github](https://github.com/saa1605/speech-emotion-recognition)* [Speech Emotion Recognition with CNN](https://towardsdatascience.com/speech-emotion-recognition-with-convolution-neural-network-1e6bb7130ce3)* [MFCCs Tutorial](http://practicalcryptography.com/miscellaneous/machine-learning/guide-mel-frequency-cepstral-coefficients-mfccs/)* [UIS-RNN Fully Supervised Speaker Diarization](https://github.com/google/uis-rnn)* [uis-rnn and speaker embedding by vgg-speaker-recognition by taylorlu](https://github.com/taylorlu/Speaker-Diarization) ---◢ Verify Correct Runtime Settings** IMPORTANT **In the "Runtime" menu for the notebook window, select "Change runtime type." Ensure that the following are selected:* Runtime Type = Python 3* Hardware Accelerator = GPU ◢ Git clone and install Mevon-AI Speech Emotion Recognition
###Code
!git clone https://github.com/SuyashMore/MevonAI-Speech-Emotion-Recognition.git
cd MevonAI-Speech-Emotion-Recognition/src
###Output
_____no_output_____
###Markdown
◢ Setup
###Code
!chmod +x setup.sh
!./setup.sh
###Output
_____no_output_____
###Markdown
◢ Instructions Add Audio FilesYou can add audio files in any language inside input/ folder.For eg. currently, there are 3 folders for 3 different Employees inside the input/ directory. Each folder contains 1 audio file of conversation between that employee with a customer. You can add many more files in each of the employee's folder. If you have 5 employees, then create 5 folders inside the **input/** directory. Then add audio files of conversation with customer of each employee in the respective folders. Run Mevon_AIDemo for running the main program is given in the next section. Diarization OutputSince each audio file has 2 speakers: customer and employee of the customer care call center, we split the audio file into 2 such that one audio file contains the audio of customer and other contains the audio of employee.These splitted audio files are stored in **output**/ folderPredicted EmotionsThe audio file of each customer is analysed by the CNN model and a **.csv** file is generated which contains the predicted emotion ◢ Recognize Emotions!!
###Code
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
!python3 speechEmotionRecognition.py
###Output
_____no_output_____ |
Dimensionality_Reduction/LDA/linear_discriminant_analysis.ipynb | ###Markdown
Linear Discriminant Analysis (LDA) Importing the libraries
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
###Output
_____no_output_____
###Markdown
Importing the dataset
###Code
dataset = pd.read_csv('Wine.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
###Output
_____no_output_____
###Markdown
Splitting the dataset into the Training set and Test set
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
###Output
_____no_output_____
###Markdown
Feature Scaling
###Code
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
###Output
_____no_output_____
###Markdown
Applying LDA
###Code
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA(n_components = 2)
X_train = lda.fit_transform(X_train, y_train)
X_test = lda.transform(X_test)
###Output
_____no_output_____
###Markdown
Training the Logistic Regression model on the Training set
###Code
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Making the Confusion Matrix
###Code
from sklearn.metrics import confusion_matrix, accuracy_score
y_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
accuracy_score(y_test, y_pred)
###Output
[[14 0 0]
[ 0 16 0]
[ 0 0 6]]
###Markdown
Visualising the Training set results
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('LD1')
plt.ylabel('LD2')
plt.legend()
plt.show()
###Output
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points.
###Markdown
Visualising the Test set results
###Code
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('LD1')
plt.ylabel('LD2')
plt.legend()
plt.show()
###Output
_____no_output_____ |
notebooks/lammps_slurm_cluster.ipynb | ###Markdown
`PyLammpsMPI` Running on queueing systems `pylammpsmpi` integrates with [dask distributed](https://distributed.dask.org/en/latest/) and [dask jobqueue](https://jobqueue.dask.org/en/latest/) to enable running lammps interactively on queueing systems. For this example, [slurm cluster](https://jobqueue.dask.org/en/latest/generated/dask_jobqueue.SLURMCluster.html) provided by dask-jobqueue will be used to set up a cluster on which pylammpsmpi will be run.
###Code
from dask.distributed import Client
from dask_jobqueue import SLURMCluster
###Output
_____no_output_____
###Markdown
Create a slurm cluster worker and ten cores. The optional would need to be tweaked according to the SLURM specifications.
###Code
cluster = SLURMCluster(queue='shorttime', cores=10, processes=1, job_cpu=10, memory="3GB", walltime="05:59:00")
###Output
_____no_output_____
###Markdown
Create a client and connect to the cluster
###Code
client = Client(cluster)
client
###Output
_____no_output_____
###Markdown
By default the cluster has no workers. We add a worker,
###Code
cluster.scale(1)
###Output
_____no_output_____
###Markdown
A visual dashboard of jobs running on the cluster is also available. Once a cluster is set up, it can be provided to pylammpsmpi
###Code
from pylammpsmpi import LammpsLibrary
###Output
_____no_output_____
###Markdown
Specify two cores for the job(since we created a cluster with 2), choose the `mode` as `dask`, and pass the `client` to pylammpsmpi object
###Code
lmp = LammpsLibrary(cores=10, mode='dask', client=client)
###Output
_____no_output_____
###Markdown
The rest is similar as you would run on a local machine - except the calculations are run on the cluster. Read an input file
###Code
lmp.file("../tests/in.simple")
###Output
_____no_output_____
###Markdown
Check version of lammps
###Code
lmp.version
###Output
_____no_output_____
###Markdown
Check number of atoms
###Code
lmp.natoms
###Output
_____no_output_____
###Markdown
Run commands
###Code
lmp.command("run 1")
lmp.command(["run 1", "run 1"])
###Output
_____no_output_____
###Markdown
Commands can also be direct
###Code
lmp.run(10)
lmp.mass(1, 20)
###Output
_____no_output_____
###Markdown
Extract a global property
###Code
lmp.extract_global("boxxhi")
###Output
_____no_output_____
###Markdown
Access thermo quantities
###Code
lmp.get_thermo("temp")
###Output
_____no_output_____
###Markdown
Thermo quantities can also be accessed directly,
###Code
lmp.temp
lmp.press
###Output
_____no_output_____
###Markdown
Accessing simulation box
###Code
lmp.extract_box()
###Output
_____no_output_____
###Markdown
Accessing and changing atom properties Get individual atom properties, for example force on each atoms
###Code
ff = lmp.gather_atoms("f")
print(type(ff))
print(len(ff))
###Output
<class 'numpy.ndarray'>
256
###Markdown
Get atom properties by their ids
###Code
ids = lmp.gather_atoms("id")
ff = lmp.gather_atoms("f", ids=ids[:10])
len(ff)
###Output
_____no_output_____
###Markdown
Change atom properties
###Code
ff = ff*0.5
lmp.scatter_atoms("f", ff, ids=ids[:10])
###Output
_____no_output_____
###Markdown
Access value of variables
###Code
temp = lmp.extract_variable("tt", "all", 0)
temp
###Output
_____no_output_____
###Markdown
Access value of computes
###Code
ke = lmp.extract_compute("ke", 1, 1)
len(ke)
v = lmp.extract_compute("v", 1, 2, width=3)
v.shape
lmp.extract_compute("1", 0, 0)
msd = lmp.extract_compute("msd", 0, 1, length=4)
msd[0]
###Output
_____no_output_____
###Markdown
Access values from fix
###Code
x = lmp.extract_fix("2", 0, 1, 1)
x
###Output
_____no_output_____
###Markdown
Change the simulation box
###Code
lmp.delete_atoms("group", "all")
lmp.reset_box([0.0,0.0,0.0], [8.0,8.0,8.0], 0.0,0.0,0.0)
###Output
_____no_output_____
###Markdown
Finally, the cluster is closed.
###Code
client.close()
cluster.close()
###Output
_____no_output_____ |
notebook/.ipynb_checkpoints/Milestone2-checkpoint.ipynb | ###Markdown
DSCI 525 - Web and Cloud Computing Milestone 2: Your team is planning to migrate to the cloud. AWS gave 400$ (100$ each) to your team to support this. As part of this initiative, your team needs to set up a server in the cloud, a collaborative environment for your team, and later move your data to the cloud. After that, your team can wrangle the data in preparation for machine learning. Milestone 2 checklist You will have mainly 2 tasks. Here is the checklist...- To set up a collaborative environment - Setup your EC2 instance with JupyterHub. - Install all necessary things needed in your UNIX server (amazon ec2 instance). - Set up your S3 bucket. - Move the data that you wrangled in your last milestone to s3. - To move data from s3.- Wrangle the data in preparation for machine learning - Get the data from S3 in your notebook and make data ready for machine learning. **Keep in mind:**- _All services you use are in region us-west-2._- _Don't store anything in these servers or storage that represents your identity as a student (like your student ID number) ._- _Use only default VPC and subnet._ - _No IP addresses are visible when you provide the screenshot._- _You do proper budgeting so that you don't run out of credits._ - _We want one single notebook for grading, and it's up to your discretion on how you do it. ***So only one person in your group needs to spin up a big instance and a ```t2.xlarge``` is of decent size.***_- _Please stop the instance when not in use. This can save you some bucks, but it's again up to you and how you budget your money. Maybe stop it if you or your team won't use it for the next 5 hours?- _Your AWS lab will shut down after 3 hours 30 min. When you start it again, your AWS credentials (***access key***,***secret***, and ***session token***) will change, and you want to update your credentials file with the new one. _- _Say something went wrong and you want to spin up another EC2 instance, then make sure you terminate the previous one._- _We will be choosing the storage to be ```Delete on Termination```, which means that stored data in your instance will be lost upon termination. Make sure you save any data to S3 and download the notebooks to your laptop so that next time you have your jupyterHub in a different instance, you can upload your notebook there.__***Outside of Milestone:*** If you are working as an individual just to practice setting up EC2 instances, make sure you select ```t2.large``` instance (not anything bigger than that as it can cost you money). I strongly recommend you spin up your own instance and experiment with the s3 bucket in doing something (there are many things that we learned and practical work from additional instructions and video series) to get comfortable with AWS. But we won't be looking at it for a grading purpose._***NOTE:*** Everything you want for this notebook is discussed in lecture 3, lecture 4, and setup instructions. 1. Setup your EC2 instance rubric={correctness:20} Please attach this screen shots from your group for grading.https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone2/image/1_result.png
###Code
from PIL import Image
Image.open("img/525_m2_1.png")
###Output
_____no_output_____
###Markdown
2. Setup your JupyterHub rubric={correctness:20} Please attach this screen shots from your group for gradingI want to see all the group members here in this screenshot https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone2/image/2_result.png
###Code
Image.open("img/525_m2_2.png")
###Output
_____no_output_____
###Markdown
3. Setup the server rubric={correctness:20} 3.1) Add your team members to EC2 instance.3.2) Setup a common data folder to download data, and this folder should be accessible by all users in the JupyterHub. 3.3)(***OPTIONAL***) Setup a sharing notebook environment.3.4) Install and configure AWS CLI. Please attach this screen shots from your group for gradingMake sure you mask the IP address refer [here](https://www.anysoftwaretools.com/blur-part-picture-mac/).https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone2/image/3_result.png
###Code
Image.open("img/525_m2_3.png")
###Output
_____no_output_____
###Markdown
4. Get the data what we wrangled in our first milestone. You have to install the packages that are needed. Refer this TLJH [document]( https://tljh.jupyter.org/en/latest/howto/env/user-environment.html).Refer ```pip``` section.Don't forget to add option -E. This way, all packages that you install will be available to other users in your JupyterHub.These packages you must install and install other packages needed for your wrangling. sudo -E pip install pandas sudo -E pip install pyarrow sudo -E pip install s3fs As in the last milestone, we looked at getting the data transferred from Python to R, and we have different solutions. Henceforth, I uploaded the parquet file format, which we can use moving forward.
###Code
import re
import os
import glob
import zipfile
import requests
from urllib.request import urlretrieve
import json
import pandas as pd
###Output
_____no_output_____
###Markdown
Rememeber here we gave the folder that we created in Step 3.2 as we made it available for all the users in a group.
###Code
# Necessary metadata
article_id = 14226968 # this is the unique identifier of the article on figshare
url = f"https://api.figshare.com/v2/articles/{article_id}"
headers = {"Content-Type": "application/json"}
output_directory = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) + "/data/shared/"
print(output_directory)
response = requests.request("GET", url, headers=headers)
data = json.loads(response.text) # this contains all the articles data, feel free to check it out
files = data["files"] # this is just the data about the files, which is what we want
files
files_to_dl = ["combined_model_data_parti.parquet.zip"] ## Please download the partitioned
for file in files:
if file["name"] in files_to_dl:
os.makedirs(output_directory, exist_ok=True)
urlretrieve(file["download_url"], output_directory + file["name"])
with zipfile.ZipFile(os.path.join(output_directory, "combined_model_data_parti.parquet.zip"), 'r') as f:
f.extractall(output_directory)
###Output
_____no_output_____
###Markdown
5. Setup your S3 bucket and move data rubric={correctness:20} 5.1) Create a bucket name should be mds-s3-xxx. Replace xxx with your "groupnumber".5.2) Create your first folder called "output".5.3) Move the "observed_daily_rainfall_SYD.csv" file from the Milestone1 data folder to your s3 bucket from your local computer.5.4) Moving the parquet file we downloaded(combined_model_data_parti.parquet) in step 4 to S3 using the cli what we installed in step 3.4.
###Code
!aws configure set aws_access_key_id "ASIAQ3IZ36HLY57PWNMW"
!aws configure set aws_secret_access_key "Nb0yGCyeulDzAB1m63fMmTzMlC2mACanVAocKh8Z"
!aws configure set aws_session_token "FwoGZXIvYXdzED8aDBTLHtDXFXdTWw5kTSLCAXez7u1RgejRbj7BQd7WDBocFWz3poxbjQsI763iVGpuL7MGSILMIQyBKLKVBB0mmgUmXHLiX7SChfnTaAogOhQTfqCe3TQz8u2K7gQF1Jj1CIyz7Qc33YuhJq11Y6g+IOeb1ODx+cMG6/XvwHa1xRK3pSL9bLu8qa3tD2d9ZP2xNCwMmp1QfoZp2GRzlM6qOmnSZKZd9of5MoTYjpWVm/is3JFDr0f/UAeZIdBNDAaurrhkEaZtZUpptZ+u0mfUyLuDKISrvZIGMi1mwYvNdwAnS7MFF4HQ2doPpou/ssFZFht2NeMsEiYSwmH7SgcAMQ0FzNs+9ho="
!aws s3 cp ../data/shared/ "s3://mds-s3-17" --recursive
!aws s3 cp ../data/raw/observed_daily_rainfall_SYD.csv "s3://mds-s3-17" --recursive
###Output
The user-provided path ../data/raw/observed_daily_rainfall_SYD.csv does not exist.
###Markdown
Please attach this screen shots from your group for gradingMake sure it has 3 objects.https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone2/image/4_result.png 6. Wrangle the data in preparation for machine learning rubric={correctness:20} Our data currently covers all of NSW, but say that our client wants us to create a machine learning model to predict rainfall over Sydney only. There's a bit of wrangling that needs to be done for that:1. We need to query our data for only the rows that contain information covering Sydney2. We need to wrangle our data into a format suitable for training a machine learning model. That will require pivoting, resampling, grouping, etc.To train an ML algorithm we need it to look like this:||model-1_rainfall|model-2_rainfall|model-3_rainfall|...|observed_rainfall||---|---|---|---|---|---||0|0.12|0.43|0.35|...|0.31||1|1.22|0.91|1.68|...|1.34||2|0.68|0.29|0.41|...|0.57| 6.1) Get the data from s3 (```combined_model_data_parti.parquet``` and ```observed_daily_rainfall_SYD.csv```)6.2) First query for Sydney data and then drop the lat and lon columns (we don't need them).```syd_lat = -33.86syd_lon = 151.21```Expected shape ```(1150049, 2)```.6.3) Save this processed file to s3 for later use: Save as a csv file ```ml_data_SYD.csv``` to ```s3://mds-s3-xxx/output/``` expected shape ```(46020,26)``` - This includes all the models as columns and also adding additional column ```Observed``` loaded from ```observed_daily_rainfall_SYD.csv``` from s3.
###Code
### Do all your coding here
import pandas as pd
import pyarrow.parquet as pq
aws_credentials ={"key": "ASIAQ3IZ36HLY57PWNMW","secret": "Nb0yGCyeulDzAB1m63fMmTzMlC2mACanVAocKh8Z", "token": "FwoGZXIvYXdzED8aDBTLHtDXFXdTWw5kTSLCAXez7u1RgejRbj7BQd7WDBocFWz3poxbjQsI763iVGpuL7MGSILMIQyBKLKVBB0mmgUmXHLiX7SChfnTaAogOhQTfqCe3TQz8u2K7gQF1Jj1CIyz7Qc33YuhJq11Y6g+IOeb1ODx+cMG6/XvwHa1xRK3pSL9bLu8qa3tD2d9ZP2xNCwMmp1QfoZp2GRzlM6qOmnSZKZd9of5MoTYjpWVm/is3JFDr0f/UAeZIdBNDAaurrhkEaZtZUpptZ+u0mfUyLuDKISrvZIGMi1mwYvNdwAnS7MFF4HQ2doPpou/ssFZFht2NeMsEiYSwmH7SgcAMQ0FzNs+9ho="}
df = pd.read_parquet("s3://mds-s3-17/combined_model_data_parti.parquet/", storage_options=aws_credentials)
# Examining the file:
df.head()
# Using the provided coordinates to filter Sydney / observed model:
syd_lat = -33.86
syd_lon = 151.21
df = df[((df["lat_min"] < syd_lat) & (df["lat_max"] > syd_lat))]
df = df[((df["lon_min"] < syd_lon) & (df["lon_max"] > syd_lon))]
df_sydney = df
# Drop the lat and lon columns as instructed:
df_sydney = df_sydney.drop(columns = ["lat_min", "lat_max", "lon_min", "lon_max"])
# df_sydney = df_sydney.reset_index(drop=True).set_index('time')
df_sydney.head()
# Check the shape of Sydney data
df_sydney_ti = df_sydney.reset_index(drop=True).set_index('time')
df_sydney_ti.shape
# Load observed data:
df_observed = pd.read_csv("s3://mds-s3-17/observed_daily_rainfall_SYD.csv", storage_options=aws_credentials)
df_observed["model"] = "observed_rainfall"
df_observed.head()
# combine the two df:
df_sydney = pd.concat([df_sydney, df_observed])
# Set the index properly:
df_sydney["time"] = pd.to_datetime(df_sydney["time"]).dt.date
df_sydney.set_index("time", inplace=True)
df_sydney.head()
# Pivot the new df
df_final = df_sydney.reset_index().pivot(index = 'time', columns = "model", values = "rain (mm/day)")
df_final = df_final.reset_index(drop=True)
df_final.head()
df_final.shape
# save the file
df_final.to_csv("s3://mds-s3-17/output/ml_data_SYD.csv", storage_options=aws_credentials, index=False)
###Output
_____no_output_____ |
knn_Coffee_0.1band_COUNT10.ipynb | ###Markdown
KNN & DTW
###Code
# -*- coding: utf-8 -*-
class Dtw(object):
def __init__(self, seq1, seq2,
patterns = [(-1,-1), (-1,0), (0,-1)],
weights = [{(0,0):2}, {(0,0):1}, {(0,0):1}],
band_r=0.05):
self._seq1 = seq1
self._seq2 = seq2
self.len_seq1 = len(seq1)
self.len_seq2 = len(seq2)
self.len_pattern = len(patterns)
self.sum_w = [sum(ws.values()) for ws in weights]
self._r = int(len(seq1)*band_r)
assert len(patterns) == len(weights)
self._patterns = patterns
self._weights = weights
def get_distance(self, i1, i2):
return abs(self._seq1[i1] - self._seq2[i2])
def calculate(self):
g = list([float('inf')]*self.len_seq2 for i in range(self.len_seq1))
cost = list([0]*self.len_seq2 for i in range(self.len_seq1))
g[0][0] = 2*self.get_distance(0, 0)
for i in range(self.len_seq1):
for j in range(max(0,i-self._r), min(i+self._r+1, self.len_seq2)):
for pat_i in range(self.len_pattern):
coor = (i+self._patterns[pat_i][0], j+self._patterns[pat_i][1])
if coor[0]<0 or coor[1]<0:
continue
dist = 0
for w_coor_offset, d_w in self._weights[pat_i].items():
w_coor = (i+w_coor_offset[0], j+w_coor_offset[1])
dist += d_w*self.get_distance(w_coor[0], w_coor[1])
this_val = g[coor[0]][coor[1]] + dist
this_cost = cost[coor[0]][coor[1]] + self.sum_w[pat_i]
if this_val < g[i][j]:
g[i][j] = this_val
cost[i][j] = this_cost
return g[self.len_seq1-1][self.len_seq2-1]/cost[self.len_seq1-1][self.len_seq2-1], g, cost
def print_table(self, tb):
print(' '+' '.join(["{:^7d}".format(i) for i in range(self.len_seq2)]))
for i in range(self.len_seq1):
str = "{:^4d}: ".format(i)
for j in range(self.len_seq2):
str += "{:^7.3f} ".format(tb[i][j])
print (str)
def print_g_matrix(self):
_, tb, _ = self.calculate()
self.print_table(tb)
def print_cost_matrix(self):
_, _, tb = self.calculate()
self.print_table(tb)
def get_dtw(self):
ans, _, _ = self.calculate()
return ans
import csv
import random
import math
import operator
import numpy as np
def loadDataset(filename, data=[]):
with open(filename, 'rb') as csvfile:
lines = csv.reader(csvfile,delimiter=' ')
dataset = list(lines)
for x in range(len(dataset)):
dataset[x] = filter(None, dataset[x])
dataset[x] = list(map(float, dataset[x]))
data.append(dataset[x])
def euclideanDistance(instance1, instance2, length):
distance = 0
for x in range(length):
if x == 0:
continue
distance += pow((instance1[x] - instance2[x]), 2)
return math.sqrt(distance)
def getNeighbors(trainingSet, testInstance, k, pattern, weight):
distances = []
length = len(testInstance)
for x in range(len(trainingSet)):
# z-normalization
new_testInstance = (np.array(testInstance)-np.mean(testInstance))/np.std(testInstance)
new_trainingSet = (np.array(trainingSet[x])-np.mean(trainingSet[x]))/np.std(trainingSet[x])
d = Dtw(testInstance[1:], trainingSet[1:], pattern, weight)
dist = d.get_dtw()
# dist = euclideanDistance(testInstance, trainingSet[x], length)
distances.append((trainingSet[x], dist))
distances.sort(key=operator.itemgetter(1))
# print "dist >>>> ",distances
neighbors = []
for x in range(k):
neighbors.append(distances[x][0])
return neighbors
def getResponse(neighbors):
classVotes = {}
for x in range(len(neighbors)):
response = neighbors[x][0]
if response in classVotes:
classVotes[response] += 1
else:
classVotes[response] = 1
sortedVotes = sorted(classVotes.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedVotes[0][0]
def getAccuracy(testSet, predictions):
correct = 0
for x in range(len(testSet)):
if testSet[x][0] == predictions[x]:
correct += 1
return (correct/float(len(testSet))) * 100.0
def knn(train_data, test_data, k, pattern, weight):
# prepare data
trainingSet=[]
testSet=[]
loadDataset(train_data, trainingSet)
loadDataset(test_data, testSet)
# print 'Train set: ' + repr(len(trainingSet))
# print trainingSet
# print 'Test set: ' + repr(len(testSet))
# print testSet
# generate predictions
predictions=[]
for x in range(len(testSet)):
# print ">>",testSet[x]
neighbors = getNeighbors(trainingSet, testSet[x], k, pattern, weight)
# print "neighbors >>", neighbors
result = getResponse(neighbors)
# print "result >>", result
predictions.append(result)
# print('> predicted=' + repr(result) + ', actual=' + repr(testSet[x][0]))
accuracy = getAccuracy(testSet, predictions)
return accuracy
###Output
_____no_output_____
###Markdown
Main
###Code
PATTERNS_1 = [(0,-1), (-1,-1), (-1,0)]
WEIGHTS_SYM_1 = [{(0,0):1}, {(0,0):2}, {(0,0):1}]
COUNT = 10
weights = []
for i in range(COUNT+1):
for j in range(COUNT-i+1):
k = COUNT - j - i
weights.append([{(0,0):i}, {(0,0):j}, {(0,0):k}])
TRAIN_DATA = 'dataset/Coffee_TRAIN'
TEST_DATA = 'dataset/Coffee_TEST'
OUTPUT_FILE = 'COUNT10_acc_coffee_0.01band.csv'
knn(TRAIN_DATA, TEST_DATA, 1, PATTERNS_1, WEIGHTS_SYM_1)
with open(OUTPUT_FILE, "w") as myfile:
myfile.write("i,j,k,accuracy\n")
for weight in weights:
i = weight[0][(0,0)]
j = weight[1][(0,0)]
k = weight[2][(0,0)]
print "i:", i, "j:", j,"k:", k
acc = knn(TRAIN_DATA, TEST_DATA, 1, PATTERNS_1, weight)
print acc
with open(OUTPUT_FILE, "a") as myfile:
myfile.write(str(i)+","+str(j)+","+str(k)+","+str(acc)+"\n")
###Output
i: 0 j: 0 k: 10
|
GOLEM database analysis.ipynb | ###Markdown
In this notebook, we explore the GOLEM tokamak database using the _pygolem_ Python module
###Code
# Python 3
import configparser as cp
import numpy as np
import matplotlib.pyplot as plt
from urllib.request import urlopen, HTTPError
%matplotlib inline
def get_shot_config(shot):
"""
Get the GOLEM shot configuration.
Parameters
----------
shot : int
GOLEM shot number
Returns
-------
dict
GOLEM shot configuration dictionnary
"""
url = 'http://golem.fjfi.cvut.cz/shots/{}/data_configuration.cfg'.format(shot)
try:
with urlopen(url) as response:
config_str = response.read().decode('utf-8')
config = cp.RawConfigParser()
data_types = dict()
config.read_string(config_str)
for data_type in config.sections():
data_types[data_type] = dict(config.items(data_type))
return data_types
except HTTPError:
print('Problem with the network? Can''t open the config file')
return None
def get_shot_data_dict(shot, signame):
"""
Returns the data dictionnary of a signal for given shot.
"""
baseURL = "http://golem.fjfi.cvut.cz/utils/data/"
url = baseURL + str(shot) +'/' + signame + '.npz'
print('Openning {} ...'.format(url))
# The source file gets downloaded and saved into a temporary directory
ds = np.DataSource()
return np.load(ds.open(url, mode='br'))# as npz: # Python 3 needs to open the file in binary mode
def dict_to_y(data_dict):
y = data_dict['data']
t = np.linspace(data_dict['t_start'], data_dict['t_end'], len(y))
return t, y
cfg = get_shot_config(22668)
irog = get_shot_data_dict(22668, 'irog')
t, _irog = dict_to_y(irog)
plt.plot(t, _irog)
###Output
Openning http://golem.fjfi.cvut.cz/utils/data/22668/irog.npz ...
###Markdown
Basic Parameters
###Code
shot = 22667
# Gas Pressure
pressure = get_shot_data_dict(shot, 'pressure')['data'] # mPa
print(pressure)
# Gas Specie
gas = get_shot_data_dict(shot, 'working_gas')['data']
print(gas)
# Plasma lifetime
is_plasma = get_shot_data_dict(shot, 'plasma')['data'] # 1 or 0
t_plasma = get_shot_data_dict(shot, 'plasma_life')['data']
ub, ubd, ucd, ust = get_shot_data_dict(shot, 'ub')['data'], get_shot_data_dict(shot, 'ubd')['data'], get_shot_data_dict(shot, 'ucd')['data'], get_shot_data_dict(shot, 'ust')['data']
tb, tbd, tcd, tst = get_shot_data_dict(shot, 'tb')['data'], get_shot_data_dict(shot, 'tbd')['data'], get_shot_data_dict(shot, 'tcd')['data'], get_shot_data_dict(shot, 'tst')['data'],
print(pressure, gas, t_plasma)
print(ub, ubd, ucd, ust)
print(tb, tbd, tcd, tst)
###Output
20.6484 b'H' 0.00754
600.0 0.0 500.0 0.0
0.005 0.005 0.006 0.005
|
data_science/code/.ipynb_checkpoints/conversion_proteomics_conc2count-checkpoint.ipynb | ###Markdown
1. Abundance [mmol/cell] = Abundance [mmol/gDW] * ( cell volume [fL/cell] * cell density [g/fL] * dry content [gDW/g] )2. Abundance [molecules/cell] = Abundance [mmol/cell] * Na [molecules/mol] * 1000 [mmol/mol]
###Code
import pandas as pd
import re
# import data
data = pd.read_csv(f"{INTERMEDIATE}/proteomics_concentrations.csv", index_col=0)
# get cell volumes
cell_volumes = pd.read_csv(f"{RAW_INTERNAL}/proteomics/growth_conditions.csv", index_col=0)
cell_volumes = cell_volumes["Single cell volume [fl]1"]
# remove the first two rows of LB
cell_volumes = cell_volumes.loc[~cell_volumes.index.duplicated(keep='first')]
# rename the number 3 in there
cell_volumes = cell_volumes.rename({'Osmotic-stress glucose3':'Osmotic-stress glucose_uncertainty'}, axis='index')
rename_dict = {i:re.sub(r'\W+', '', i).lower() for i in cell_volumes.index}
cell_volumes = cell_volumes.rename(rename_dict, axis='index')
# Finally, convert to mmol/gDW:
water_content = 0.3
cell_density = 1.105e-12
# Iterate through the dataset and multiply by the corresponding cell volume, to get mmol/fL:
for (col_name, d) in data.iteritems():
chemo_name = col_name.replace("_uncertainty", "").replace("_mean", "")
try:
data[col_name] = data[col_name] * cell_volumes.loc[chemo_name]#["cell_volume"]
except:
print(chemo_name)
data = data * cell_density * water_content
# convert into counts
data = data * 6.022e+23 / 1000
data
original_data = pd.read_csv(f"{RAW_INTERNAL}/proteomics/protein_values.csv", index_col=0)
original_data
###Output
_____no_output_____ |
notebooks/2018-03-05-Supervised-Learning-Explained-1.ipynb | ###Markdown
What is Supervised LearningThere are few types of machine learning. Supervised Learning is one of them.The fundamental concept is letting a system learn from lots of **labeled** data. After the learning, the system will be able to predict the result when new data come.This is supervised learning. **Labeled** data means, we know the meaning of our data. Example can be, - Given fact, like house size, location, year of build, we know the price. Here the price is the label. House size, location and year of build are called features.- Given a photo, we know whether is a cat. Here whether is a cat is the label. And photo is the features. How Learn a Cat is a CatWhen we were young, someone, likely our parents, told us that is a cat when we see a cat. So we just looked at the cat visually and label it as a cat.And late on, when we see a cat, we predict it. - Sometime we are right, our parents will say good job.- Sometime we are left, our parents will say, no, that is not a cat.Over time, we will get better predication, super close to 100%.Our parents are our supervisor in that case. We are learning in supervised way. Before LearningWithout previous knowledge, let a system tell whether a photo is a cat.The system is a plain cubic box, it can only guess randomly. Either yes, it's a cat. Or no, it's not a cat. Just like toss a coin. So the accuracy will be just 50%. readmore After LearningWhile if we give the cubic box enough **labeled** data, and let it learn **long enough**.The plain cubic box will become a magic box. It will have high accuracy to tell whether a photo is a cat. How Good a Magic Box isOf course we want our magic box able to:- classify a photo is a cat if a coming photo is a cat- classify a photo is not a cat if a coming photo is not a catWe can measure it with following steps:1. give the magic box few **new** labeled photos2. the magic box do the classification3. compare the predicted result and expected result to get the accuracy. > **New** photos, means that the magic box has never see this photo before. How to Make a Good Magic BoxThere are few general ways to get a better magic box.1. Give it more photos2. Use better algorithms3. Buy more powerful machines4. Let it learn long enoughWe can always letting the box learn long enough. Though it would not be an option most of the time.We could also buy more powerful machines, if we got enough money. Again, might not an option.So most of time, we are spending time either getting more data or looking for better algorithms. Hello World Algorithm - Logistic RegressionJust like any other programming language, there is Hello World. In supervised learning world, it's Logistic Regression. **Logistics Regression** is a supervised learning algorithm to solve classification problems.Yes. It's strange, **Logistics Regression** is used to solve classification problems.If we take cat classification problem, denote- every pixel as an input feature \\(x\\), denote as \\(x_1, x_2, x_3 ..., x_n\\)- every pixel has a weight \\(w\\), denote as \\(w_1, w_2, w_3 ..., w_n\\)- a bias value regardless of any pixel \\(b\\)Logistics Regression use equations$$z=w_1x_1+w_2x_2+w_3x_3+...+w_nx_n+b$$$$y=\sigma(z)=\frac 1{1+e^{-z}}$$By training it long enough, and we get a set of value \\(w_1, w_2, w_3 ..., w_n\\) and \\(b\\).We will able to calculate the result by substitute the value \\(x_1, x_2, x_3 ..., x_n\\), \\(w_1, w_2, w_3 ..., w_n\\), \\(b\\) with the previous equation. How About Other AlgorithmsSo Logistic Regression sounds simple. How about other algorithms?Theroatically it's just more equoations.And of course, it will take lots of effort to make it work, and make it work betterHere is an example of a bit complex algorithm. It still able to draw every connections. For modern deep learning, it will not able to draw every connections since there are more than million connections. Why Machine Learning is Difficult Hard to UnderstandOne dimension is easy. We can easily figure it out. How about 2, 3, 4 dimensions?Our brain will not able to plot it when its over 3 dimension. It will be difficult, if we can not make the algorithm a picture in our head Hard to TryWe could just try every combination if - Combination are in finite space- Time cost is small- Financial cost is smallWhile that is not the case for machine learning. It can not be done by brutal force. Hard to Get Enough Cleaning DataThe world is complex. Data could be here, there, in this format, in that format, correct or incorrect.It takes lots of time and effort to get **clean** data.If we could not get enough clean data, no matter how good algorithm we have. It will goes to idiom "garbage in, garbage out". A Little Bit on Linear AlgbraIt will not take more than 3 minutes.$$\begin{align*} 1 * 7 + 2 * 9 + 3 * 11 &= 58\\1 * 8 + 2 * 10 + 3 * 12 &= 64\end{align*} $$The first one [2, 3] matrix, and the second one is [3, 2] matrix, the result will be [2, 2] matrix.In general, if we have a [m, n] matrix dot product [n, o] matrix, the result will be [m, o] matrix. Vectorize Logistic RegressionRecall the equations used Ligistic Regression $$\begin{align*} z&=w_1x_1+w_2x_2+w_3x_3+...+w_nx_n+b \\\hat y=a&=\sigma(z)=\frac 1{1+e^{-z}}\end{align*} $$If we set w as [1, dim] matrix, and x as [dim, 1] matrix. We can rewrite the previous equation as.$$\begin{align*} z&=w\cdot x \\[1, 1] &\Leftarrow [1, dim] \cdot [dim, 1] \end{align*}$$If we stack all samples of x as [dim, m] matrix. Each column is one example. and stack all labels y together as [1, m]. Z has shape, [dim, m]. We can write the equation of the whole dataset as$$\begin{align*} Z &= w\cdot X\\[1, m] &\Leftarrow [1, dim] \cdot [dim, m]\end{align*}$$So after vectorize, we have following parameter and with the shape|parameter|shape||:--------|:----|| X | [dim, m]|| Y,A,Z | [1, m]|| w | [1, dim]| Implement Logistic Regression Forward PropagationWith the equations we have. We can simply implement **Logistic Regression** with **numpy**, which is a linear algebra library in Python.We can create a test data and implement logistic regression forward propagation like this
###Code
import numpy as np
## Generate test data
dim = 3 # just tested with 3 dimentions
m = 10 # just tested with 10 samples
np.random.seed(1) # set seed, so that we will get predictable value for random
X = np.random.randn(dim, m) # generate random [dim, m] matrix
Y = np.random.randint(2, size=(1, m)) # generate random int 0 or 1, matrix [1, m]
## Initialize parameter
w = np.random.randn(1, dim) # generate inital weight with random value
b = 0.0 # inital bias
## The following two lines are logistic regression forward propagation
Z = np.dot(w, X) + b # dot product w and X, then plus b. numpy will broadcast b
A = 1.0 / (1.0 + np.exp(-Z)) # sigmod function
print(A)
###Output
[[0.05968404 0.97675612 0.66520774 0.68710686 0.12648752 0.8792792
0.54440253 0.81416813 0.4697947 0.28029121]]
###Markdown
Cost Function**Lost function** is used to define how close a predict result to expected result.In logistic regression, lost function for each example defined as$$ \mathcal{L}(a, y) = - y \log(a) - (1-y) \log(1-a)\tag{8}$$There is an explanation by Andrew Ng about [why use this definition](https://www.coursera.org/learn/neural-networks-deep-learning/lecture/SmIbQ/explanation-of-logistic-regression-cost-function-optional).**Cost function** is used to define how close for all predict result to expected result (**label**). The **cost function** defines as the average lost over all the training examples. $$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{9}$$Recall that A and Y are both is [1, m]. In order to get sum. We can write one line to get cost from A and Y. We will get an [1, 1] matrix just by dot product [1, m] and [m, 1] . $$J = \frac{1}{m} \bigg(-Y \cdot log(A.T) - (1 - Y) \cdot log(1-A.T) \bigg)$$And implemented with numpy in one line
###Code
cost = (-np.dot(Y, np.log(A.T)) - np.dot(Y, np.log(1 - A.T))) / m
print(np.squeeze(cost))
###Output
1.4042604972431578
###Markdown
Gradient DecentIf I'm dropped randomly in a ski resort, it is less likely that I'm dropped off at the lowest bottom. The gravity will pull you down on the slope when you ski.| | ||:-|:-|| | |We also need get the slope when training our model. By moving on the slope direction towards smaller cost, our model will getting better and better. This slope called gradient decent. It is the derivative of the cost function. As we define the cost function as $$ \mathcal{L}(\hat{y}, y^{(i)}) = - y^{(i)} \log(\hat y^{(i)}) - (1-y^{(i)} ) \log(1-\hat y^{(i)})\tag{8}$$Based on the basic calculus equations. $$\begin{align*} \big(f(x)g(x)\big)' &= f(x)g'(x)+f'(x)g(x) \\\big(log(x)\big)'&=\frac1x\\\frac{dz}{dx}=\frac{dz}{dy}.\frac{dy}{dx}=g'(y)f'(x)&=g'\big(f(x)\big)f'(x) \\J &= \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{9}\end{align*} $$We can get $$\begin{align*} \frac{\partial \mathcal{L}}{\partial z} &= \frac{\partial \mathcal{L}}{\partial \mathcal{\hat y}}\cdot&\frac{\partial \hat{y}}{\partial {z}}\\&=-(\frac y{\hat y}+\frac{1-y}{1-\hat y}) \cdot&\hat y(1-\hat y)\\&=\hat y - y\\\\\frac{\partial J}{\partial z} &= \frac{1}{m} \sum_{i=1}^m\frac{\partial \mathcal{L}}{\partial z}\end{align*} $$ $$\begin{align*} dw&=\frac{1}{m}(A - Y)\cdot X^T\\[1, dim] &\Leftarrow [1, m] \cdot [dim, m]^T \\db&=\frac{1}{m}\sum_{i=1}^m(A-Y)\end{align*} $$ With the equation for \\(dw\\) and \\(db\\), we can easily implemented as.
###Code
dw = np.dot(A - Y, X.T) / m
db = np.sum(A - Y) / m
print(dw, db)
###Output
[[-0.33415942 -0.2731823 0.00633877]] -0.04968219501005455
###Markdown
Backward PropogationJust like when we are in a mountain, we can easy follow the slope to get to the valley. Since we as human is much smaller than the mountain.How about a giant in the mountain. He might never get to the valley by fixed large step. Same apply to machine learning. We need to control the step, which is called learning rate \\(\alpha\\)to avoid to over shooting. After knowing the gradient decent \\(dw\\), \\(db\\) and controlling the learning rate \\(\alpha\\) we can update the weights \\(w\\) and bias \\(b\\) with following code.
###Code
learning_rate = 0.005
w = w - learning_rate * dw
b = b - learning_rate * b
###Output
_____no_output_____
###Markdown
Whole AlgorithmJust like we are in mountain. It's less likely that we will arrive to the valley in one stepThe same applies to here, we need iterate many times, with all the previous preparations.we can write Pseudo code like this.```pythoninit weightinit biasfor i in range(number_iterations): forward_progation calculate cost stop iterate if cost already small enough calculate gradient decent update weights and bias```We can implement the previous pseudo code in two functions.
###Code
def propagate(w, b, X, Y):
"""
w: weights, [1, m]
b: bias, scalar value
X: features, [dim, m]
Y: labels, [1, m]
"""
m = X.shape[1]
Z = np.dot(w, X) + b
A = 1.0 / (1.0 + np.exp(-Z))
dw = np.dot(A - Y, X.T) / m
db = np.sum(A - Y) / m
return dw, db, A
def logistic_regression(X, Y, num_iterations=10, learning_rate=0.01):
dim, m = X.shape
w = np.zeros((1, dim)) # Initialize weights to zero
b = 0.0 # Initialize bias to zero
costs = [] # save cost for each iteration
for i in range(num_iterations):
dw, db, A = propagate(w, b, X, Y)
cost = -(np.dot(Y, np.log(A.T)) + np.dot(1-Y, np.log(1-A.T))) / m
# update weights and bias
w = w - learning_rate * dw
b = b - learning_rate * db
if i % 100 == 0:
print(i, cost)
costs.append(cost)
return w, b, costs, A
###Output
_____no_output_____
###Markdown
Get a Data SetThere is a cat data set from [coursera deep learning course](https://www.coursera.org/learn/neural-networks-deep-learning/notebook/zAgPl/logistic-regression-with-a-neural-network-mindset)The data set is encoded in HDF5 format which is typically used to store numeric data. The following piece of code is copied from deep learning course to load the cat data
###Code
import numpy as np
import h5py
train_dataset = h5py.File('../datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('../datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
###Output
/Users/rockie/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
###Markdown
The shape of each sample is [64, 64, 3]. Both height and width are 64. Each pixel has 3 values for each channel, Red Green and Blue. The value range for each channel is from 0 which is darkest, to 255 which is the lightest.we can use matplotlib plot a sample.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(train_set_x_orig[10])
print(train_set_x_orig[10].shape)
print(train_set_x_orig[10][0, 0])
###Output
(64, 64, 3)
[188 180 119]
###Markdown
Preprocess the DataA photo has two dimension, x and y. Each point is a pixel. Each pixel in RGB photo has 3 value, Red, Green and Blue. In **Logistic Regression**, we need to convert to one dimension. **Normalize** data will be an important step. Since machine Learning will typically has better result by normalize the value to range [-1, 1], or [0, 1].We can pre-process with the following code. And get dimension 12288 which is 64*64*3. 209 sample for training set and 50 for test set.
###Code
m_train = train_set_x_orig.shape[0] # number of train samples
m_test = test_set_x_orig.shape[0] # number of test samples
num_px = train_set_x_orig.shape[1] # number pixel on x and y dimension
train_set_x = train_set_x_orig.reshape(m_train, -1).T / 255 # normalize pixel value to [0, 1]
test_set_x = test_set_x_orig.reshape(m_test, -1).T / 255
print(train_set_x.shape, test_set_x.shape)
###Output
(12288, 209) (12288, 50)
###Markdown
Train the Model with Logistic RegressionWe can train model with the data set get weights, bias.
###Code
w, b, costs, A=logistic_regression(X=train_set_x, Y=train_set_y, num_iterations = 2001, learning_rate = 0.005)
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
costs[:10]
###Output
_____no_output_____
###Markdown
How Good is the Model on the Sample it has Seen
###Code
train_predict = np.where(A >= 0.5, 1, 0)
train_accuracy = np.sum(train_predict == train_set_y) / train_set_y.shape[1]
print("train accuracy", train_accuracy)
wrong_index = np.argmax(train_predict != train_set_y)
print("wrong predict on sample ", wrong_index, " to ", train_predict[:, wrong_index], "which should be", train_set_y[:, wrong_index])
plt.imshow(train_set_x_orig[wrong_index])
###Output
train accuracy 0.9904306220095693
wrong predict on sample 41 to [0] which should be [1]
###Markdown
Will you say that is a cat? :-( How Good is the Model on the Sample it has not seen
###Code
def predict(w, b, X):
Z = np.dot(w, X)
A = 1.0 / (1.0 + np.exp(-Z))
return np.where(A >= 0.5, 1, 0)
test_predict = predict(w, b, test_set_x)
test_accuracy = np.sum(test_predict == test_set_y) / test_set_y.shape[1]
print("test accuracy", test_accuracy)
wrong_index = np.argmax(test_predict != test_set_y)
print("wrong predict on sample ", wrong_index, " to ", test_predict[:, wrong_index], "which should be", test_set_y[:, wrong_index])
plt.imshow(test_set_x_orig[wrong_index])
###Output
test accuracy 0.7
wrong predict on sample 5 to [1] which should be [0]
|
Course/Data structures and algorithms/3.Basic algorithm/1.Basic algorithms/6.trie_introduction.ipynb | ###Markdown
TrieYou've learned about Trees and Binary Search Trees. In this notebook, you'll learn about a new type of Tree called Trie. Before we dive into the details, let's talk about the kind of problem Trie can help with.Let's say you want to build software that provides spell check. This software will only say if the word is valid or not. It doesn't give suggested words. From the knowledge you've already learned, how would you build this?The simplest solution is to have a hashmap of all known words. It would take O(1) to see if a word exists, but the memory size would be O(n\*m), where n is the number of words and m is the length of the word. Let's see how a Trie can help decrease the memory usage while sacrificing a little on performance. Basic TrieLet's look at a basic Trie with the following words: "a", "add", and "hi"
###Code
basic_trie = {
# a and add word
'a': {
'd': {
'd': {'word_end': True},
'word_end': False},
'word_end': True},
# hi word
'h': {
'i': {'word_end': True},
'word_end': False}}
print('Is "a" a word: {}'.format(basic_trie['a']['word_end']))
print('Is "ad" a word: {}'.format(basic_trie['a']['d']['word_end']))
print('Is "add" a word: {}'.format(basic_trie['a']['d']['d']['word_end']))
###Output
_____no_output_____
###Markdown
You can lookup a word by checking if `word_end` is `True` after traversing all the characters in the word. Let's look at the word "hi". The first letter is "h", so you would call `basic_trie['h']`. The second letter is "i", so you would call `basic_trie['h']['i']`. Since there's no more letters left, you would see if this is a valid word by getting the value of `word_end`. Now you have `basic_trie['h']['i']['word_end']` with `True` or `False` if the word exists.In `basic_trie`, words "a" and "add" overlapp. This is where a Trie saves memory. Instead of having "a" and "add" in different cells, their characters treated like nodes in a tree. Let's see how we would check if a word exists in `basic_trie`.
###Code
def is_word(word):
"""
Look for the word in `basic_trie`
"""
current_node = basic_trie
for char in word:
if char not in current_node:
return False
current_node = current_node[char]
return current_node['word_end']
# Test words
test_words = ['ap', 'add']
for word in test_words:
if is_word(word):
print('"{}" is a word.'.format(word))
else:
print('"{}" is not a word.'.format(word))
###Output
_____no_output_____
###Markdown
The `is_word` starts with the root node, `basic_trie`. It traverses each character (`char`) in the word (`word`). If a character doesn't exist while traversing, this means the word doesn't exist in the trie. Once all the characters are traversed, the function returns the value of `current_node['word_end']`.You might notice the function `is_word` is similar to a binary search tree traversal. Since Trie is a tree, it makes sense that we would use a type of tree traversal. Now that you've seen a basic example of a Trie, let's build something more familiar. Trie Using a ClassJust like most tree data structures, let's use classes to build the Trie. Implement two functions for the `Trie` class below. Implement `add` to add a word to the Trie. Implement `exists` to return `True` if the word exist in the trie and `False` if the word doesn't exist in the trie.
###Code
class TrieNode(object):
def __init__(self):
self.is_word = False
self.children = {}
class Trie(object):
def __init__(self):
self.root = TrieNode()
def add(self, word):
"""
Add `word` to trie
"""
pass
def exists(self, word):
"""
Check if word exists in trie
"""
pass
###Output
_____no_output_____
###Markdown
Show Solution
###Code
word_list = ['apple', 'bear', 'goo', 'good', 'goodbye', 'goods', 'goodwill', 'gooses' ,'zebra']
word_trie = Trie()
# Add words
for word in word_list:
word_trie.add(word)
# Test words
test_words = ['bear', 'goo', 'good', 'goos']
for word in test_words:
if word_trie.exists(word):
print('"{}" is a word.'.format(word))
else:
print('"{}" is not a word.'.format(word))
class TrieNode(object):
def __init__(self):
self.is_word = False
self.children = {}
class Trie(object):
def __init__(self):
self.root = TrieNode()
def add(self, word):
"""
Add `word` to trie
"""
current_node = self.root
for char in word:
if char not in current_node.children:
current_node.children[char] = TrieNode()
current_node = current_node.children[char]
current_node.is_word = True
def exists(self, word):
"""
Check if word exists in trie
"""
current_node = self.root
for char in word:
if char not in current_node.children:
return False
current_node = current_node.children[char]
return current_node.is_word
###Output
_____no_output_____
###Markdown
Trie using Defaultdict (Optional)This is an optional section. Feel free to skip this and go to the next section of the classroom.A cleaner way to build a trie is with a Python default dictionary. The following `TrieNod` class is using `collections.defaultdict` instead of a normal dictionary.
###Code
import collections
class TrieNode:
def __init__(self):
self.children = collections.defaultdict(TrieNode)
self.is_word = False
###Output
_____no_output_____
###Markdown
Implement the `add` and `exists` function below using the new `TrieNode` class.
###Code
class Trie(object):
def __init__(self):
self.root = TrieNode()
def add(self, word):
"""
Add `word` to trie
"""
pass
def exists(self, word):
"""
Check if word exists in trie
"""
pass
###Output
_____no_output_____
###Markdown
Hide Solution
###Code
class Trie(object):
def __init__(self):
self.root = TrieNode()
def add(self, word):
"""
Add `word` to trie
"""
current_node = self.root
for char in word:
current_node = current_node.children[char]
current_node.is_word = True
def exists(self, word):
"""
Check if word exists in trie
"""
current_node = self.root
for char in word:
if char not in current_node.children:
return False
current_node = current_node.children[char]
return current_node.is_word
# Add words
valid_words = ['the', 'a', 'there', 'answer', 'any', 'by', 'bye', 'their']
word_trie = Trie()
for valid_word in valid_words:
word_trie.add(valid_word)
# Tests
assert word_trie.exists('the')
assert word_trie.exists('any')
assert not word_trie.exists('these')
assert not word_trie.exists('zzz')
print('All tests passed!')
###Output
_____no_output_____ |
ReinforcementLearning/DAT257x/library/LabFiles/Module 2/Ex2.4 Thompson Beta.ipynb | ###Markdown
DAT257x: Reinforcement Learning Explained Lab 2: Bandits Exercise 2.4 Thompson Beta
###Code
import numpy as np
import sys
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.bandit import BanditEnv
from lib.simulation import Experiment
#Policy interface
class Policy:
#num_actions: (int) Number of arms [indexed by 0 ... num_actions-1]
def __init__(self, num_actions):
self.num_actions = num_actions
def act(self):
pass
def feedback(self, action, reward):
pass
###Output
_____no_output_____
###Markdown
Now let's implement a Thompson Beta algorithm.
###Code
#Tompson Beta policy
class ThompsonBeta(Policy):
def __init__(self, num_actions):
Policy.__init__(self, num_actions)
#PRIOR Hyper-params: successes = 1; failures = 1
self.total_counts = np.zeros(num_actions, dtype = np.longdouble)
self.name = "Thompson Beta"
#For each arm, maintain success and failures
self.successes = np.ones(num_actions, dtype = np.int)
self.failures = np.ones(num_actions, dtype = np.int)
def act(self):
current_action = np.argmax(np.random.beta(self.successes, self.failures))
return current_action
def feedback(self, action, reward):
if reward > 0:
self.successes[action] += 1
else:
self.failures[action] += 1
self.total_counts[action] += 1
###Output
_____no_output_____
###Markdown
Now let's prepare the simulation.
###Code
evaluation_seed = 1239
num_actions = 10
trials = 10000
distribution = "bernoulli"
###Output
_____no_output_____
###Markdown
What do you think the regret graph would look like?
###Code
env = BanditEnv(num_actions, distribution, evaluation_seed)
agent = ThompsonBeta(num_actions)
experiment = Experiment(env, agent)
experiment.run_bandit(trials)
###Output
Distribution: bernoulli [0.5061565 0.74836123 0.53065236 0.37446716 0.88168477 0.83849367
0.3951277 0.13217982 0.44509856 0.03459039]
Optimal arm: 4
|
tps-2022-02/notebooks/Notebook 1 - Exploratory Analysis.ipynb | ###Markdown
Tabular Playground Series - February 2022For this months TPS, we are predicting the class of bacteria based on the histogram of DNA bases found in 10-mers of the DNA segments. We are told that our data includes simulated measurement errors.
###Code
import pandas as pd
import numpy as np
import pyarrow
import time
import re
import math
import plotly.express as px
import matplotlib.pyplot as plt
from IPython.display import Image
import seaborn as sns; sns.set_theme()
from sklearn.preprocessing import LabelEncoder
from math import factorial
import gc
def prepare_data(path, integer = False, remove_dupes = True):
# Load Data
df = pd.read_csv(path)
df = df.drop('row_id', axis = 1)
features = [x for x in df.columns if x not in ['row_id','target']]
bias = lambda w, x, y, z: factorial(10) / (factorial(w) * factorial(x) * factorial(y) * factorial(z) * 4**10)
# Create integer data
df_i = dict()
for col in features:
w = int(col[1:col.index('T')])
x = int(col[col.index('T')+1:col.index('G')])
y = int(col[col.index('G')+1:col.index('C')])
z = int(col[col.index('C')+1:])
df_i[col] = ((df[col] + bias(w, x, y, z)) * 1000000).round().astype(int)
df_i = pd.DataFrame(df_i)
# Get GCDs
gcd = df_i[features[0]]
for col in features[1:]:
gcd = np.gcd(gcd, df_i[col])
df['gcd'] = gcd
# Return integer histograms?
if integer:
df[features] = df_i[features]
gc.collect()
# Get sample weight
if remove_dupes:
vc = df.value_counts()
df = pd.DataFrame([list(tup) for tup in vc.index.values], columns = df.columns)
df['sample_weight'] = vc.values
# Save Memory
for col, dtype in df.dtypes.iteritems():
if dtype.name.startswith('int'):
df[col] = pd.to_numeric(df[col], downcast ='integer')
elif dtype.name.startswith('float'):
df[col] = pd.to_numeric(df[col], downcast ='float')
return df
def load_data():
try:
# Successive notebook runs will load the preprocessed data locally
train = pd.read_feather('../data/train.feather')
test = pd.read_feather('../data/test.feather')
submission = pd.read_csv('../data/sample_submission.csv')
except:
# First run has to perform preprocessing
train = prepare_data('../data/train.csv')
train.to_feather('../data/train.feather')
test = prepare_data('../data/test.csv', remove_dupes = False)
test.to_feather('../data/test.feather')
submission = pd.read_csv('../data/sample_submission.csv')
encoder = LabelEncoder()
train['target'] = encoder.fit_transform(train['target'])
return train, test, submission, encoder
###Output
_____no_output_____
###Markdown
Load Data
###Code
%%time
train, test, submission, encoder = load_data()
target_bins = train['target'].astype(str) + train['gcd'].astype(str)
# Features
features = [x for x in train.columns if x not in ['row_id','target','sample_weight','gcd']]
print(f'Training Samples: {len(train)}')
###Output
Training Samples: 123993
CPU times: total: 1.67 s
Wall time: 307 ms
###Markdown
Duplicate DataOur training data contains many duplicate rows which have been combined and given a weight based on how often it was duplicated
###Code
# Training Data Dupes
temp = train.groupby(['gcd','sample_weight'])['target'].count()
temp = temp.reset_index()
temp['sample_weight'] = temp['sample_weight'].astype('str')
fig, ax = plt.subplots(2, 2, figsize = (12,9))
gcd = [[1,10],[1000,10000]]
for row in range(2):
for col in range(2):
idx = 2*row + col
ax[row,col].bar(
temp[temp.gcd == gcd[row][col]]['sample_weight'],
temp[temp.gcd == gcd[row][col]]['target'],
)
ax[row,col].set_title(f'Duplicates for GCD = {gcd[row][col]} (Training)')
###Output
_____no_output_____
###Markdown
Labels
###Code
import plotly.express as px
# Training Data Dupes
temp = train.groupby(['gcd','sample_weight', 'target'])['A0T0G0C10'].count()
temp = temp.reset_index()
temp['sample_weight'] = temp['sample_weight'].astype('str')
fig = px.bar(
temp[temp.gcd == 1000], x="sample_weight", y='A0T0G0C10',
color="target", title = "Duplicates Per Label (GCD = 1000)",
labels = {'A0T0G0C10':'Counts'}
)
img_bytes = fig.to_image(format="png")
Image(img_bytes)
# Training Data Dupes
temp = train.groupby(['gcd','sample_weight', 'target'])['A0T0G0C10'].count()
temp = temp.reset_index()
temp['sample_weight'] = temp['sample_weight'].astype('str')
fig = px.bar(
temp[temp.gcd == 10000], x="sample_weight", y='A0T0G0C10',
color="target", title = "Duplicates Per Label (GCD = 10000)",
labels = {'A0T0G0C10':'Counts'}
)
img_bytes = fig.to_image(format="png")
Image(img_bytes)
###Output
_____no_output_____
###Markdown
Test Data
###Code
# Test Data dupes
temp = prepare_data('../data/test.csv')
temp = temp.groupby(['gcd','sample_weight'])['A0T0G0C10'].count()
temp = temp.reset_index()
temp['sample_weight'] = temp['sample_weight'].astype('str')
fig, ax = plt.subplots(2, 2, figsize = (12,9))
gcd = [[1,10],[1000,10000]]
for row in range(2):
for col in range(2):
idx = 2*row + col
ax[row,col].bar(
temp[temp.gcd == gcd[row][col]]['sample_weight'],
temp[temp.gcd == gcd[row][col]]['A0T0G0C10'],
)
ax[row,col].set_title(f'Duplicates for GCD = {gcd[row][col]} (Test)')
###Output
_____no_output_____
###Markdown
Original HistogramsThe training data is formed by creating histograms out of 10-mers, then subtracting off the "bias". The bias is the 10-mer count you would expect if you generated completely random DNA sequences. We then divide this unbiased count by the total number of 10-mers (1 million). Each row consists of different numbers of reads which are then multiplied by a constant so that their row sum is 1 million.
###Code
%%time
original_train = prepare_data('../data/train.csv', integer = True, remove_dupes = False)
original_test = prepare_data('../data/test.csv', integer = True, remove_dupes = False)
print(f'Training Samples: {len(original_train)}')
print(f'Test Samples: {len(original_test)}')
original_train = original_train[original_train.gcd == 1][features].sum(axis = 0)
original_test = original_test[original_test.gcd == 1][features].sum(axis = 0)
original_train //= 2
temp = original_train - original_test
temp.sort_values().head(20)
###Output
_____no_output_____
###Markdown
CorrelationWe should expect a good deal of correlation since in counting the 10-mers there should be a lot of overlap, since shifting left or right will change at most two values by one.
###Code
corr_matrix = train[features].corr()
np.fill_diagonal(corr_matrix.values, 0)
fig, ax = plt.subplots(figsize=(9, 6))
sns.heatmap(corr_matrix, ax=ax)
###Output
_____no_output_____
###Markdown
Principal ComponentsWe can look at the principal components explained variance to see the redundancy.
###Code
from sklearn.decomposition import PCA
pca = PCA()
pca.fit(train[features])
cumsum = np.cumsum(pca.explained_variance_ratio_)
fig, ax = plt.subplots(figsize = (9,6))
ax.plot(range(1,len(cumsum)+1), cumsum)
plt.ylabel('Explained Variance')
plt.xlabel('# of Components')
# Confusion matrix
fig, ax = plt.subplots(2, 2, figsize = (12,9))
GCD = [[1,10],[1000,10000]]
for row in range(2):
for col in range(2):
idx = 2*row + col
gcd = GCD[row][col]
pca = PCA(whiten = True, random_state = 0)
pca.fit(train[features][train.gcd == gcd])
train_pca = pca.transform(train[features][train.gcd == gcd])
ax[row,col].scatter(
train_pca[:,0],train_pca[:,1],
c = train[train.gcd == gcd]['target'], s = 1
)
ax[row,col].set_title(
f"{1000000 // gcd} reads ({(train['gcd'] == gcd).sum()} unique samples)"
)
plt.show()
###Output
_____no_output_____
###Markdown
Test Data w/ PCA
###Code
# Confusion matrix
fig, ax = plt.subplots(2, 2, figsize = (12,9))
GCD = [[1,10],[1000,10000]]
for row in range(2):
for col in range(2):
idx = 2*row + col
gcd = GCD[row][col]
pca = PCA(whiten = True, random_state = 0)
pca.fit(train[features][train.gcd == gcd])
train_pca = pca.transform(train[features][train.gcd == gcd])
test_pca = pca.transform(test[features][test.gcd == gcd])
ax[row,col].scatter(
train_pca[:,0],train_pca[:,1],
c = train[train.gcd == gcd]['target'], s = 1
)
ax[row,col].scatter(
test_pca[:,0],test_pca[:,1],
c = 'green', s = 1
)
ax[row,col].set_title(
f"{1000000 // gcd} reads ({(train['gcd'] == gcd).sum()} unique samples)"
)
plt.show()
###Output
_____no_output_____ |
18_reinforcement_learning-nelson_changes.ipynb | ###Markdown
**Chapter 18 – Reinforcement Learning** _This notebook contains all the sample code in chapter 18_. SetupFirst, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
###Code
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Is this notebook running on Colab or Kaggle?
IS_COLAB = "google.colab" in sys.modules
IS_KAGGLE = "kaggle_secrets" in sys.modules
if IS_COLAB or IS_KAGGLE:
!apt update && apt install -y libpq-dev libsdl2-dev swig xorg-dev xvfb
%pip install -U tf-agents pyvirtualdisplay
%pip install -U gym>=0.21.0
%pip install -U gym[box2d,atari,accept-rom-license]
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
if not tf.config.list_physical_devices('GPU'):
print("No GPU was detected. CNNs can be very slow without a GPU.")
if IS_COLAB:
print("Go to Runtime > Change runtime and select a GPU hardware accelerator.")
if IS_KAGGLE:
print("Go to Settings > Accelerator and select GPU.")
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
tf.random.set_seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# To get smooth animations
import matplotlib.animation as animation
mpl.rc('animation', html='jshtml')
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
###Output
Matplotlib created a temporary config/cache directory at /tmp/matplotlib-xqle0041 because the default path (/.config/matplotlib) is not a writable directory; it is highly recommended to set the MPLCONFIGDIR environment variable to a writable directory, in particular to speed up the import of Matplotlib and to better support multiprocessing.
###Markdown
Introduction to OpenAI gym In this notebook we will be using [OpenAI gym](https://gym.openai.com/), a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning *agents* to interact with. Let's start by importing `gym`:
###Code
import gym
###Output
_____no_output_____
###Markdown
Let's list all the available environments:
###Code
gym.envs.registry.all()
###Output
_____no_output_____
###Markdown
The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.
###Code
env = gym.make('CartPole-v1')
###Output
_____no_output_____
###Markdown
Let's initialize the environment by calling is `reset()` method. This returns an observation:
###Code
env.seed(42)
obs = env.reset()
###Output
_____no_output_____
###Markdown
Observations vary depending on the environment. In this case it is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity.
###Code
obs
###Output
_____no_output_____
###Markdown
An environment can be visualized by calling its `render()` method, and you can pick the rendering mode (the rendering options depend on the environment). **Warning**: some environments (including the Cart-Pole) require access to your display, which opens up a separate window, even if you specify `mode="rgb_array"`. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like [Xvfb](http://en.wikipedia.org/wiki/Xvfb). On Debian or Ubuntu:```bash$ apt update$ apt install -y xvfb```You can then start Jupyter using the `xvfb-run` command:```bash$ xvfb-run -s "-screen 0 1400x900x24" jupyter notebook```Alternatively, you can install the [pyvirtualdisplay](https://github.com/ponty/pyvirtualdisplay) Python library which wraps Xvfb:```bash%pip install -U pyvirtualdisplay```And run the following code:
###Code
try:
import pyvirtualdisplay
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
except ImportError:
pass
env.render()
###Output
_____no_output_____
###Markdown
 In this example we will set `mode="rgb_array"` to get an image of the environment as a NumPy array:
###Code
img = env.render(mode="rgb_array")
img.shape
def plot_environment(env, figsize=(5,4)):
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
return img
plot_environment(env)
plt.show()
###Output
_____no_output_____
###Markdown
Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
Yep, just two possible actions: accelerate towards the left or towards the right. Since the pole is leaning toward the right (`obs[2] > 0`), let's accelerate the cart toward the right:
###Code
action = 1 # accelerate right
obs, reward, done, info = env.step(action)
obs
###Output
_____no_output_____
###Markdown
 Notice that the cart is now moving toward the right (`obs[1] > 0`). The pole is still tilted toward the right (`obs[2] > 0`), but its angular velocity is now negative (`obs[3] < 0`), so it will likely be tilted toward the left after the next step.
###Code
plot_environment(env)
save_fig("cart_pole_plot")
###Output
Saving figure cart_pole_plot
###Markdown
Looks like it's doing what we're telling it to do! The environment also tells the agent how much reward it got during the last step:
###Code
reward
###Output
_____no_output_____
###Markdown
When the game is over, the environment returns `done=True`:
###Code
done
###Output
_____no_output_____
###Markdown
Finally, `info` is an environment-specific dictionary that can provide some extra information that you may find useful for debugging or for training. For example, in some games it may indicate how many lives the agent has.
###Code
info
###Output
_____no_output_____
###Markdown
The sequence of steps between the moment the environment is reset until it is done is called an "episode". At the end of an episode (i.e., when `step()` returns `done=True`), you should reset the environment before you continue to use it.
###Code
if done:
obs = env.reset()
###Output
_____no_output_____
###Markdown
Now how can we make the poll remain upright? We will need to define a _policy_ for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do. A simple hard-coded policy Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and _vice versa_. Let's see if that works:
###Code
env.seed(42)
def basic_policy(obs):
angle = obs[2]
return 0 if angle < 0 else 1
totals = []
for episode in range(500):
episode_rewards = 0
obs = env.reset()
for step in range(200):
action = basic_policy(obs)
obs, reward, done, info = env.step(action)
episode_rewards += reward
if done:
break
totals.append(episode_rewards)
np.mean(totals), np.std(totals), np.min(totals), np.max(totals)
###Output
_____no_output_____
###Markdown
Well, as expected, this strategy is a bit too basic: the best it did was to keep the poll up for only 68 steps. This environment is considered solved when the agent keeps the poll up for 200 steps. Let's visualize one episode:
###Code
env.seed(42)
frames = []
obs = env.reset()
for step in range(200):
img = env.render(mode="rgb_array")
frames.append(img)
action = basic_policy(obs)
obs, reward, done, info = env.step(action)
if done:
break
###Output
_____no_output_____
###Markdown
Now show the animation:
###Code
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
anim = animation.FuncAnimation(
fig, update_scene, fargs=(frames, patch),
frames=len(frames), repeat=repeat, interval=interval)
plt.close()
return anim
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Clearly the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that! Neural Network Policies  Let's create a neural network that will take observations as inputs, and output the probabilities of actions to take for each observation. To choose an action, the network will estimate a probability for each action, then we will select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability `p` of the action 0 (left), and of course the probability of action 1 (right) will be `1 - p`.
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
n_inputs = 4 # == env.observation_space.shape[0]
model = keras.models.Sequential([
keras.layers.Dense(5, activation="elu", input_shape=[n_inputs]),
keras.layers.Dense(1, activation="sigmoid"),
])
###Output
_____no_output_____
###Markdown
In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state. You may wonder why we plan to pick a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between _exploring_ new actions and _exploiting_ the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried. Let's write a small function that will run the model to play one episode, and return the frames so we can display an animation:
###Code
def render_policy_net(model, n_max_steps=200, seed=42):
frames = []
env = gym.make("CartPole-v1")
env.seed(seed)
np.random.seed(seed)
obs = env.reset()
for step in range(n_max_steps):
frames.append(env.render(mode="rgb_array"))
left_proba = model.predict(obs.reshape(1, -1))
action = int(np.random.rand() > left_proba)
obs, reward, done, info = env.step(action)
if done:
break
env.close()
return frames
###Output
_____no_output_____
###Markdown
Now let's look at how well this randomly initialized policy network performs:
###Code
frames = render_policy_net(model)
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. We can make the same net play in 50 different environments in parallel (this will give us a diverse training batch at each step), and train for 5000 iterations. We also reset environments when they are done. We train the model using a custom training loop so we can easily use the predictions at each training step to advance the environments.
###Code
n_environments = 50
n_iterations = 5000
envs = [gym.make("CartPole-v1") for _ in range(n_environments)]
for index, env in enumerate(envs):
env.seed(index)
np.random.seed(42)
observations = [env.reset() for env in envs]
optimizer = keras.optimizers.RMSprop()
loss_fn = keras.losses.binary_crossentropy
for iteration in range(n_iterations):
# if angle < 0, we want proba(left) = 1., or else proba(left) = 0.
target_probas = np.array([([1.] if obs[2] < 0 else [0.])
for obs in observations])
with tf.GradientTape() as tape:
left_probas = model(np.array(observations))
loss = tf.reduce_mean(loss_fn(target_probas, left_probas))
print("\rIteration: {}, Loss: {:.3f}".format(iteration, loss.numpy()), end="")
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
actions = (np.random.rand(n_environments, 1) > left_probas.numpy()).astype(np.int32)
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(actions[env_index][0])
observations[env_index] = obs if not done else env.reset()
for env in envs:
env.close()
frames = render_policy_net(model)
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own. One that does not wobble as much. Policy Gradients To train this neural network we will need to define the target probabilities `y`. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in an episode, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the _credit assignment problem_.The _Policy Gradients_ algorithm tackles this problem by first playing multiple episodes, then making the actions in good episodes slightly more likely, while actions in bad episodes are made slightly less likely. First we play, then we go back and think about what we did. Let's start by creating a function to play a single step using the model. We will also pretend for now that whatever action it takes is the right one, so we can compute the loss and its gradients (we will just save these gradients for now, and modify them later depending on how good or bad the action turned out to be): 
###Code
def play_one_step(env, obs, model, loss_fn):
with tf.GradientTape() as tape:
left_proba = model(obs[np.newaxis])
action = (tf.random.uniform([1, 1]) > left_proba)
y_target = tf.constant([[1.]]) - tf.cast(action, tf.float32)
loss = tf.reduce_mean(loss_fn(y_target, left_proba))
grads = tape.gradient(loss, model.trainable_variables)
obs, reward, done, info = env.step(int(action[0, 0].numpy()))
return obs, reward, done, grads
###Output
_____no_output_____
###Markdown
If `left_proba` is high, then `action` will most likely be `False` (since a random number uniformally sampled between 0 and 1 will probably not be greater than `left_proba`). And `False` means 0 when you cast it to a number, so `y_target` would be equal to 1 - 0 = 1. In other words, we set the target to 1, meaning we pretend that the probability of going left should have been 100% (so we took the right action). Now let's create another function that will rely on the `play_one_step()` function to play multiple episodes, returning all the rewards and gradients, for each episode and each step:
###Code
def play_multiple_episodes(env, n_episodes, n_max_steps, model, loss_fn):
all_rewards = []
all_grads = []
for episode in range(n_episodes):
current_rewards = []
current_grads = []
obs = env.reset()
for step in range(n_max_steps):
obs, reward, done, grads = play_one_step(env, obs, model, loss_fn)
current_rewards.append(reward)
current_grads.append(grads)
if done:
break
all_rewards.append(current_rewards)
all_grads.append(current_grads)
return all_rewards, all_grads
###Output
_____no_output_____
###Markdown
The Policy Gradients algorithm uses the model to play the episode several times (e.g., 10 times), then it goes back and looks at all the rewards, discounts them and normalizes them. So let's create couple functions for that: the first will compute discounted rewards; the second will normalize the discounted rewards across many episodes.
###Code
def discount_rewards(rewards, discount_rate):
discounted = np.array(rewards)
for step in range(len(rewards) - 2, -1, -1):
discounted[step] += discounted[step + 1] * discount_rate
return discounted
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate)
for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean) / reward_std
for discounted_rewards in all_discounted_rewards]
###Output
_____no_output_____
###Markdown
Say there were 3 actions, and after each action there was a reward: first 10, then 0, then -50. If we use a discount factor of 80%, then the 3rd action will get -50 (full credit for the last reward), but the 2nd action will only get -40 (80% credit for the last reward), and the 1st action will get 80% of -40 (-32) plus full credit for the first reward (+10), which leads to a discounted reward of -22:
###Code
discount_rewards([10, 0, -50], discount_rate=0.8)
###Output
_____no_output_____
###Markdown
To normalize all discounted rewards across all episodes, we compute the mean and standard deviation of all the discounted rewards, and we subtract the mean from each discounted reward, and divide by the standard deviation:
###Code
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
n_iterations = 150
n_episodes_per_update = 10
n_max_steps = 200
discount_rate = 0.95
optimizer = keras.optimizers.Adam(learning_rate=0.01)
loss_fn = keras.losses.binary_crossentropy
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential([
keras.layers.Dense(5, activation="elu", input_shape=[4]),
keras.layers.Dense(1, activation="sigmoid"),
])
env = gym.make("CartPole-v1")
env.seed(42);
for iteration in range(n_iterations):
all_rewards, all_grads = play_multiple_episodes(
env, n_episodes_per_update, n_max_steps, model, loss_fn)
total_rewards = sum(map(sum, all_rewards)) # Not shown in the book
print("\rIteration: {}, mean rewards: {:.1f}".format( # Not shown
iteration, total_rewards / n_episodes_per_update), end="") # Not shown
all_final_rewards = discount_and_normalize_rewards(all_rewards,
discount_rate)
all_mean_grads = []
for var_index in range(len(model.trainable_variables)):
mean_grads = tf.reduce_mean(
[final_reward * all_grads[episode_index][step][var_index]
for episode_index, final_rewards in enumerate(all_final_rewards)
for step, final_reward in enumerate(final_rewards)], axis=0)
all_mean_grads.append(mean_grads)
optimizer.apply_gradients(zip(all_mean_grads, model.trainable_variables))
env.close()
frames = render_policy_net(model)
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Markov Chains 
###Code
np.random.seed(42)
transition_probabilities = [ # shape=[s, s']
[0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3
[0.0, 0.0, 0.9, 0.1], # from s1 to ...
[0.0, 1.0, 0.0, 0.0], # from s2 to ...
[0.0, 0.0, 0.0, 1.0]] # from s3 to ...
n_max_steps = 50
def print_sequence():
current_state = 0
print("States:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = np.random.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
###Output
States: 0 0 3
States: 0 1 2 1 2 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
States: 0 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 ...
States: 0 0 3
States: 0 0 0 1 2 1 2 1 3
States: 0 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 2 1 3
###Markdown
Markov Decision Process  Let's define some transition probabilities, rewards and possible actions. For example, in state s0, if action a0 is chosen then with proba 0.7 we will go to state s0 with reward +10, with probability 0.3 we will go to state s1 with no reward, and with never go to state s2 (so the transition probabilities are `[0.7, 0.3, 0.0]`, and the rewards are `[+10, 0, 0]`):
###Code
transition_probabilities = [ # shape=[s, a, s']
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]],
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None]]
rewards = [ # shape=[s, a, s']
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]]]
possible_actions = [[0, 1, 2], [0, 2], [1]]
###Output
_____no_output_____
###Markdown
  Q-Value Iteration 
###Code
Q_values = np.full((3, 3), -np.inf) # -np.inf for impossible actions
for state, actions in enumerate(possible_actions):
Q_values[state, actions] = 0.0 # for all possible actions
gamma = 0.90 # the discount factor
history1 = [] # Not shown in the book (for the figure below)
for iteration in range(50):
Q_prev = Q_values.copy()
history1.append(Q_prev) # Not shown
for s in range(3):
for a in possible_actions[s]:
Q_values[s, a] = np.sum([
transition_probabilities[s][a][sp]
* (rewards[s][a][sp] + gamma * np.max(Q_prev[sp]))
for sp in range(3)])
history1 = np.array(history1) # Not shown
Q_values
np.argmax(Q_values, axis=1)
###Output
_____no_output_____
###Markdown
The optimal policy for this MDP, when using a discount factor of 0.90, is to choose action a0 when in state s0, and choose action a0 when in state s1, and finally choose action a1 (the only possible action) when in state s2. Let's try again with a discount factor of 0.95:
###Code
Q_values = np.full((3, 3), -np.inf) # -np.inf for impossible actions
for state, actions in enumerate(possible_actions):
Q_values[state, actions] = 0.0 # for all possible actions
gamma = 0.95 # the discount factor
for iteration in range(50):
Q_prev = Q_values.copy()
for s in range(3):
for a in possible_actions[s]:
Q_values[s, a] = np.sum([
transition_probabilities[s][a][sp]
* (rewards[s][a][sp] + gamma * np.max(Q_prev[sp]))
for sp in range(3)])
Q_values
np.argmax(Q_values, axis=1)
###Output
_____no_output_____
###Markdown
Now the policy has changed! In state s1, we now prefer to go through the fire (choose action a2). This is because the discount factor is larger so the agent values the future more, and it is therefore ready to pay an immediate penalty in order to get more future rewards. Q-Learning Q-Learning works by watching an agent play (e.g., randomly) and gradually improving its estimates of the Q-Values. Once it has accurate Q-Value estimates (or close enough), then the optimal policy consists in choosing the action that has the highest Q-Value (i.e., the greedy policy). We will need to simulate an agent moving around in the environment, so let's define a function to perform some action and get the new state and a reward:
###Code
def step(state, action):
probas = transition_probabilities[state][action]
next_state = np.random.choice([0, 1, 2], p=probas)
reward = rewards[state][action][next_state]
return next_state, reward
###Output
_____no_output_____
###Markdown
We also need an exploration policy, which can be any policy, as long as it visits every possible state many times. We will just use a random policy, since the state space is very small:
###Code
def exploration_policy(state):
return np.random.choice(possible_actions[state])
###Output
_____no_output_____
###Markdown
Now let's initialize the Q-Values like earlier, and run the Q-Learning algorithm:
###Code
np.random.seed(42)
Q_values = np.full((3, 3), -np.inf)
for state, actions in enumerate(possible_actions):
Q_values[state][actions] = 0
alpha0 = 0.05 # initial learning rate
decay = 0.005 # learning rate decay
gamma = 0.90 # discount factor
state = 0 # initial state
history2 = [] # Not shown in the book
for iteration in range(10000):
history2.append(Q_values.copy()) # Not shown
action = exploration_policy(state)
next_state, reward = step(state, action)
next_value = np.max(Q_values[next_state]) # greedy policy at the next step
alpha = alpha0 / (1 + iteration * decay)
Q_values[state, action] *= 1 - alpha
Q_values[state, action] += alpha * (reward + gamma * next_value)
state = next_state
history2 = np.array(history2) # Not shown
Q_values
np.argmax(Q_values, axis=1) # optimal action for each state
true_Q_value = history1[-1, 0, 0]
fig, axes = plt.subplots(1, 2, figsize=(10, 4), sharey=True)
axes[0].set_ylabel("Q-Value$(s_0, a_0)$", fontsize=14)
axes[0].set_title("Q-Value Iteration", fontsize=14)
axes[1].set_title("Q-Learning", fontsize=14)
for ax, width, history in zip(axes, (50, 10000), (history1, history2)):
ax.plot([0, width], [true_Q_value, true_Q_value], "k--")
ax.plot(np.arange(width), history[:, 0, 0], "b-", linewidth=2)
ax.set_xlabel("Iterations", fontsize=14)
ax.axis([0, width, 0, 24])
save_fig("q_value_plot")
###Output
Saving figure q_value_plot
###Markdown
Deep Q-Network Let's build the DQN. Given a state, it will estimate, for each possible action, the sum of discounted future rewards it can expect after it plays that action (but before it sees its outcome):
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
env = gym.make("CartPole-v1")
input_shape = [4] # == env.observation_space.shape
n_outputs = 2 # == env.action_space.n
model = keras.models.Sequential([
keras.layers.Dense(32, activation="elu", input_shape=input_shape),
keras.layers.Dense(32, activation="elu"),
keras.layers.Dense(n_outputs)
])
###Output
_____no_output_____
###Markdown
To select an action using this DQN, we just pick the action with the largest predicted Q-value. However, to ensure that the agent explores the environment, we choose a random action with probability `epsilon`.
###Code
def epsilon_greedy_policy(state, epsilon=0):
if np.random.rand() < epsilon:
return np.random.randint(n_outputs)
else:
Q_values = model.predict(state[np.newaxis])
return np.argmax(Q_values[0])
###Output
_____no_output_____
###Markdown
We will also need a replay memory. It will contain the agent's experiences, in the form of tuples: `(obs, action, reward, next_obs, done)`. We can use the `deque` class for that (but make sure to check out DeepMind's excellent [Reverb library](https://github.com/deepmind/reverb) for a much more robust implementation of experience replay):
###Code
from collections import deque
replay_memory = deque(maxlen=2000)
###Output
_____no_output_____
###Markdown
And let's create a function to sample experiences from the replay memory. It will return 5 NumPy arrays: `[obs, actions, rewards, next_obs, dones]`.
###Code
def sample_experiences(batch_size):
indices = np.random.randint(len(replay_memory), size=batch_size)
batch = [replay_memory[index] for index in indices]
states, actions, rewards, next_states, dones = [
np.array([experience[field_index] for experience in batch])
for field_index in range(5)]
return states, actions, rewards, next_states, dones
###Output
_____no_output_____
###Markdown
Now we can create a function that will use the DQN to play one step, and record its experience in the replay memory:
###Code
def play_one_step(env, state, epsilon):
action = epsilon_greedy_policy(state, epsilon)
next_state, reward, done, info = env.step(action)
replay_memory.append((state, action, reward, next_state, done))
return next_state, reward, done, info
###Output
_____no_output_____
###Markdown
Lastly, let's create a function that will sample some experiences from the replay memory and perform a training step:**Notes**:* The first 3 releases of the 2nd edition were missing the `reshape()` operation which converts `target_Q_values` to a column vector (this is required by the `loss_fn()`).* The book uses a learning rate of 1e-3, but in the code below I use 1e-2, as it significantly improves training. I also tuned the learning rates of the DQN variants below.
###Code
batch_size = 32
discount_rate = 0.95
optimizer = keras.optimizers.Adam(learning_rate=1e-2)
loss_fn = keras.losses.mean_squared_error
def training_step(batch_size):
experiences = sample_experiences(batch_size)
states, actions, rewards, next_states, dones = experiences
next_Q_values = model.predict(next_states)
max_next_Q_values = np.max(next_Q_values, axis=1)
target_Q_values = (rewards +
(1 - dones) * discount_rate * max_next_Q_values)
target_Q_values = target_Q_values.reshape(-1, 1)
mask = tf.one_hot(actions, n_outputs)
with tf.GradientTape() as tape:
all_Q_values = model(states)
Q_values = tf.reduce_sum(all_Q_values * mask, axis=1, keepdims=True)
loss = tf.reduce_mean(loss_fn(target_Q_values, Q_values))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
###Output
_____no_output_____
###Markdown
And now, let's train the model!
###Code
env.seed(42)
np.random.seed(42)
tf.random.set_seed(42)
rewards = []
best_score = 0
for episode in range(600):
obs = env.reset()
for step in range(200):
epsilon = max(1 - episode / 500, 0.01)
obs, reward, done, info = play_one_step(env, obs, epsilon)
if done:
break
rewards.append(step) # Not shown in the book
if step >= best_score: # Not shown
best_weights = model.get_weights() # Not shown
best_score = step # Not shown
print("\rEpisode: {}, Steps: {}, eps: {:.3f}".format(episode, step + 1, epsilon), end="") # Not shown
if episode > 50:
training_step(batch_size)
model.set_weights(best_weights)
###Output
Episode: 599, Steps: 200, eps: 0.010
###Markdown

###Code
plt.figure(figsize=(8, 4))
plt.plot(rewards)
plt.xlabel("Episode", fontsize=14)
plt.ylabel("Sum of rewards", fontsize=14)
save_fig("dqn_rewards_plot")
plt.show()
env.seed(42)
state = env.reset()
frames = []
for step in range(200):
action = epsilon_greedy_policy(state)
state, reward, done, info = env.step(action)
if done:
break
img = env.render(mode="rgb_array")
frames.append(img)
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Not bad at all! 😀 Double DQN
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Dense(32, activation="elu", input_shape=[4]),
keras.layers.Dense(32, activation="elu"),
keras.layers.Dense(n_outputs)
])
target = keras.models.clone_model(model)
target.set_weights(model.get_weights())
batch_size = 32
discount_rate = 0.95
optimizer = keras.optimizers.Adam(learning_rate=6e-3)
loss_fn = keras.losses.Huber()
def training_step(batch_size):
experiences = sample_experiences(batch_size)
states, actions, rewards, next_states, dones = experiences
next_Q_values = model.predict(next_states)
best_next_actions = np.argmax(next_Q_values, axis=1)
next_mask = tf.one_hot(best_next_actions, n_outputs).numpy()
next_best_Q_values = (target.predict(next_states) * next_mask).sum(axis=1)
target_Q_values = (rewards +
(1 - dones) * discount_rate * next_best_Q_values)
target_Q_values = target_Q_values.reshape(-1, 1)
mask = tf.one_hot(actions, n_outputs)
with tf.GradientTape() as tape:
all_Q_values = model(states)
Q_values = tf.reduce_sum(all_Q_values * mask, axis=1, keepdims=True)
loss = tf.reduce_mean(loss_fn(target_Q_values, Q_values))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
replay_memory = deque(maxlen=2000)
env.seed(42)
np.random.seed(42)
tf.random.set_seed(42)
rewards = []
best_score = 0
for episode in range(600):
obs = env.reset()
for step in range(200):
epsilon = max(1 - episode / 500, 0.01)
obs, reward, done, info = play_one_step(env, obs, epsilon)
if done:
break
rewards.append(step)
if step >= best_score:
best_weights = model.get_weights()
best_score = step
print("\rEpisode: {}, Steps: {}, eps: {:.3f}".format(episode, step + 1, epsilon), end="")
if episode >= 50:
training_step(batch_size)
if episode % 50 == 0:
target.set_weights(model.get_weights())
# Alternatively, you can do soft updates at each step:
#if episode >= 50:
#target_weights = target.get_weights()
#online_weights = model.get_weights()
#for index in range(len(target_weights)):
# target_weights[index] = 0.99 * target_weights[index] + 0.01 * online_weights[index]
#target.set_weights(target_weights)
model.set_weights(best_weights)
plt.figure(figsize=(8, 4))
plt.plot(rewards)
plt.xlabel("Episode", fontsize=14)
plt.ylabel("Sum of rewards", fontsize=14)
save_fig("double_dqn_rewards_plot")
plt.show()
env.seed(43)
state = env.reset()
frames = []
for step in range(200):
action = epsilon_greedy_policy(state)
state, reward, done, info = env.step(action)
if done:
break
img = env.render(mode="rgb_array")
frames.append(img)
plot_animation(frames)
###Output
_____no_output_____
###Markdown
Dueling Double DQN
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
K = keras.backend
input_states = keras.layers.Input(shape=[4])
hidden1 = keras.layers.Dense(32, activation="elu")(input_states)
hidden2 = keras.layers.Dense(32, activation="elu")(hidden1)
state_values = keras.layers.Dense(1)(hidden2)
raw_advantages = keras.layers.Dense(n_outputs)(hidden2)
advantages = raw_advantages - K.max(raw_advantages, axis=1, keepdims=True)
Q_values = state_values + advantages
model = keras.models.Model(inputs=[input_states], outputs=[Q_values])
target = keras.models.clone_model(model)
target.set_weights(model.get_weights())
batch_size = 32
discount_rate = 0.95
optimizer = keras.optimizers.Adam(learning_rate=7.5e-3)
loss_fn = keras.losses.Huber()
def training_step(batch_size):
experiences = sample_experiences(batch_size)
states, actions, rewards, next_states, dones = experiences
next_Q_values = model.predict(next_states)
best_next_actions = np.argmax(next_Q_values, axis=1)
next_mask = tf.one_hot(best_next_actions, n_outputs).numpy()
next_best_Q_values = (target.predict(next_states) * next_mask).sum(axis=1)
target_Q_values = (rewards +
(1 - dones) * discount_rate * next_best_Q_values)
target_Q_values = target_Q_values.reshape(-1, 1)
mask = tf.one_hot(actions, n_outputs)
with tf.GradientTape() as tape:
all_Q_values = model(states)
Q_values = tf.reduce_sum(all_Q_values * mask, axis=1, keepdims=True)
loss = tf.reduce_mean(loss_fn(target_Q_values, Q_values))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
replay_memory = deque(maxlen=2000)
env.seed(42)
np.random.seed(42)
tf.random.set_seed(42)
rewards = []
best_score = 0
for episode in range(600):
obs = env.reset()
for step in range(200):
epsilon = max(1 - episode / 500, 0.01)
obs, reward, done, info = play_one_step(env, obs, epsilon)
if done:
break
rewards.append(step)
if step >= best_score:
best_weights = model.get_weights()
best_score = step
print("\rEpisode: {}, Steps: {}, eps: {:.3f}".format(episode, step + 1, epsilon), end="")
if episode >= 50:
training_step(batch_size)
if episode % 50 == 0:
target.set_weights(model.get_weights())
model.set_weights(best_weights)
plt.plot(rewards)
plt.xlabel("Episode")
plt.ylabel("Sum of rewards")
plt.show()
env.seed(42)
state = env.reset()
frames = []
for step in range(200):
action = epsilon_greedy_policy(state)
state, reward, done, info = env.step(action)
if done:
break
img = env.render(mode="rgb_array")
frames.append(img)
plot_animation(frames)
###Output
_____no_output_____
###Markdown
This looks like a pretty robust agent!
###Code
env.close()
###Output
_____no_output_____
###Markdown
Using TF-Agents to Beat Breakout Let's use TF-Agents to create an agent that will learn to play Breakout. We will use the Deep Q-Learning algorithm, so you can easily compare the components with the previous implementation, but TF-Agents implements many other (and more sophisticated) algorithms! TF-Agents Environments
###Code
tf.random.set_seed(42)
np.random.seed(42)
from tf_agents.environments import suite_gym
env = suite_gym.load("Breakout-v4")
env
env.gym
env.seed(42)
env.reset()
env.step(1) # Fire
img = env.render(mode="rgb_array")
plt.figure(figsize=(6, 8))
plt.imshow(img)
plt.axis("off")
save_fig("breakout_plot")
plt.show()
env.current_time_step()
###Output
_____no_output_____
###Markdown
Environment Specifications
###Code
env.observation_spec()
env.action_spec()
env.time_step_spec()
###Output
_____no_output_____
###Markdown
Environment Wrappers You can wrap a TF-Agents environments in a TF-Agents wrapper:
###Code
from tf_agents.environments.wrappers import ActionRepeat
repeating_env = ActionRepeat(env, times=4)
repeating_env
repeating_env.unwrapped
###Output
_____no_output_____
###Markdown
Here is the list of available wrappers:
###Code
import tf_agents.environments.wrappers
for name in dir(tf_agents.environments.wrappers):
obj = getattr(tf_agents.environments.wrappers, name)
if hasattr(obj, "__base__") and issubclass(obj, tf_agents.environments.wrappers.PyEnvironmentBaseWrapper):
print("{:27s} {}".format(name, obj.__doc__.split("\n")[0]))
###Output
ActionClipWrapper Wraps an environment and clips actions to spec before applying.
ActionDiscretizeWrapper Wraps an environment with continuous actions and discretizes them.
ActionOffsetWrapper Offsets actions to be zero-based.
ActionRepeat Repeates actions over n-steps while acummulating the received reward.
FlattenObservationsWrapper Wraps an environment and flattens nested multi-dimensional observations.
GoalReplayEnvWrapper Adds a goal to the observation, used for HER (Hindsight Experience Replay).
HistoryWrapper Adds observation and action history to the environment's observations.
ObservationFilterWrapper Filters observations based on an array of indexes.
OneHotActionWrapper Converts discrete action to one_hot format.
PerformanceProfiler End episodes after specified number of steps.
PyEnvironmentBaseWrapper PyEnvironment wrapper forwards calls to the given environment.
RunStats Wrapper that accumulates run statistics as the environment iterates.
TimeLimit End episodes after specified number of steps.
###Markdown
The `suite_gym.load()` function can create an env and wrap it for you, both with TF-Agents environment wrappers and Gym environment wrappers (the latter are applied first).
###Code
from functools import partial
from gym.wrappers import TimeLimit
limited_repeating_env = suite_gym.load(
"Breakout-v4",
gym_env_wrappers=[partial(TimeLimit, max_episode_steps=10000)],
env_wrappers=[partial(ActionRepeat, times=4)],
)
limited_repeating_env
limited_repeating_env.unwrapped
###Output
_____no_output_____
###Markdown
Create an Atari Breakout environment, and wrap it to apply the default Atari preprocessing steps: **Warning**: Breakout requires the player to press the FIRE button at the start of the game and after each life lost. The agent may take a very long time learning this because at first it seems that pressing FIRE just means losing faster. To speed up training considerably, we create and use a subclass of the `AtariPreprocessing` wrapper class called `AtariPreprocessingWithAutoFire` which presses FIRE (i.e., plays action 1) automatically at the start of the game and after each life lost. This is different from the book which uses the regular `AtariPreprocessing` wrapper.
###Code
from tf_agents.environments import suite_atari
from tf_agents.environments.atari_preprocessing import AtariPreprocessing
from tf_agents.environments.atari_wrappers import FrameStack4
max_episode_steps = 27000 # <=> 108k ALE frames since 1 step = 4 frames
environment_name = "BreakoutNoFrameskip-v4"
class AtariPreprocessingWithAutoFire(AtariPreprocessing):
def reset(self, **kwargs):
obs = super().reset(**kwargs)
super().step(1) # FIRE to start
return obs
def step(self, action):
lives_before_action = self.ale.lives()
obs, rewards, done, info = super().step(action)
if self.ale.lives() < lives_before_action and not done:
super().step(1) # FIRE to start after life lost
return obs, rewards, done, info
env = suite_atari.load(
environment_name,
max_episode_steps=max_episode_steps,
gym_env_wrappers=[AtariPreprocessingWithAutoFire, FrameStack4])
env
###Output
_____no_output_____
###Markdown
Play a few steps just to see what happens:
###Code
env.seed(42)
env.reset()
for _ in range(4):
time_step = env.step(3) # LEFT
def plot_observation(obs):
# Since there are only 3 color channels, you cannot display 4 frames
# with one primary color per frame. So this code computes the delta between
# the current frame and the mean of the other frames, and it adds this delta
# to the red and blue channels to get a pink color for the current frame.
obs = obs.astype(np.float32)
img = obs[..., :3]
current_frame_delta = np.maximum(obs[..., 3] - obs[..., :3].mean(axis=-1), 0.)
img[..., 0] += current_frame_delta
img[..., 2] += current_frame_delta
img = np.clip(img / 150, 0, 1)
plt.imshow(img)
plt.axis("off")
plt.figure(figsize=(6, 6))
plot_observation(time_step.observation)
save_fig("preprocessed_breakout_plot")
plt.show()
###Output
Saving figure preprocessed_breakout_plot
###Markdown
Convert the Python environment to a TF environment:
###Code
from tf_agents.environments.tf_py_environment import TFPyEnvironment
tf_env = TFPyEnvironment(env)
###Output
_____no_output_____
###Markdown
Creating the DQN Create a small class to normalize the observations. Images are stored using bytes from 0 to 255 to use less RAM, but we want to pass floats from 0.0 to 1.0 to the neural network: Create the Q-Network:
###Code
from tf_agents.networks.q_network import QNetwork
preprocessing_layer = keras.layers.Lambda(
lambda obs: tf.cast(obs, np.float32) / 255.)
conv_layer_params=[(32, (8, 8), 4), (64, (4, 4), 2), (64, (3, 3), 1)]
fc_layer_params=[512]
q_net = QNetwork(
tf_env.observation_spec(),
tf_env.action_spec(),
preprocessing_layers=preprocessing_layer,
conv_layer_params=conv_layer_params,
fc_layer_params=fc_layer_params)
###Output
_____no_output_____
###Markdown
Create the DQN Agent:
###Code
from tf_agents.agents.dqn.dqn_agent import DqnAgent
train_step = tf.Variable(0)
update_period = 4 # run a training step every 4 collect steps
optimizer = keras.optimizers.RMSprop(learning_rate=2.5e-4, rho=0.95, momentum=0.0,
epsilon=0.00001, centered=True)
epsilon_fn = keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=1.0, # initial ε
decay_steps=250000 // update_period, # <=> 1,000,000 ALE frames
end_learning_rate=0.01) # final ε
agent = DqnAgent(tf_env.time_step_spec(),
tf_env.action_spec(),
q_network=q_net,
optimizer=optimizer,
target_update_period=2000, # <=> 32,000 ALE frames
td_errors_loss_fn=keras.losses.Huber(reduction="none"),
gamma=0.99, # discount factor
train_step_counter=train_step,
epsilon_greedy=lambda: epsilon_fn(train_step))
agent.initialize()
###Output
_____no_output_____
###Markdown
Create the replay buffer (this will use a lot of RAM, so please reduce the buffer size if you get an out-of-memory error): **Warning**: we use a replay buffer of size 100,000 instead of 1,000,000 (as used in the book) since many people were getting OOM (Out-Of-Memory) errors.
###Code
from tf_agents.replay_buffers import tf_uniform_replay_buffer
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=agent.collect_data_spec,
batch_size=tf_env.batch_size,
max_length=100000) # reduce if OOM error
replay_buffer_observer = replay_buffer.add_batch
###Output
_____no_output_____
###Markdown
Create a simple custom observer that counts and displays the number of times it is called (except when it is passed a trajectory that represents the boundary between two episodes, as this does not count as a step):
###Code
class ShowProgress:
def __init__(self, total):
self.counter = 0
self.total = total
def __call__(self, trajectory):
if not trajectory.is_boundary():
self.counter += 1
if self.counter % 100 == 0:
print("\r{}/{}".format(self.counter, self.total), end="")
###Output
_____no_output_____
###Markdown
Let's add some training metrics:
###Code
from tf_agents.metrics import tf_metrics
train_metrics = [
tf_metrics.NumberOfEpisodes(),
tf_metrics.EnvironmentSteps(),
tf_metrics.AverageReturnMetric(),
tf_metrics.AverageEpisodeLengthMetric(),
]
train_metrics[0].result()
from tf_agents.eval.metric_utils import log_metrics
import logging
logging.getLogger().setLevel(logging.INFO)
log_metrics(train_metrics)
###Output
INFO:absl:
NumberOfEpisodes = 0
EnvironmentSteps = 0
AverageReturn = 0.0
AverageEpisodeLength = 0.0
###Markdown
Create the collect driver:
###Code
from tf_agents.drivers.dynamic_step_driver import DynamicStepDriver
collect_driver = DynamicStepDriver(
tf_env,
agent.collect_policy,
observers=[replay_buffer_observer] + train_metrics,
num_steps=update_period) # collect 4 steps for each training iteration
###Output
_____no_output_____
###Markdown
Collect the initial experiences, before training:
###Code
from tf_agents.policies.random_tf_policy import RandomTFPolicy
initial_collect_policy = RandomTFPolicy(tf_env.time_step_spec(),
tf_env.action_spec())
init_driver = DynamicStepDriver(
tf_env,
initial_collect_policy,
observers=[replay_buffer.add_batch, ShowProgress(20000)],
num_steps=20000) # <=> 80,000 ALE frames
final_time_step, final_policy_state = init_driver.run()
###Output
20000/20000
###Markdown
Let's sample 2 sub-episodes, with 3 time steps each and display them: **Note**: `replay_buffer.get_next()` is deprecated. We must use `replay_buffer.as_dataset(..., single_deterministic_pass=False)` instead.
###Code
tf.random.set_seed(9) # chosen to show an example of trajectory at the end of an episode
#trajectories, buffer_info = replay_buffer.get_next( # get_next() is deprecated
# sample_batch_size=2, num_steps=3)
trajectories, buffer_info = next(iter(replay_buffer.as_dataset(
sample_batch_size=2,
num_steps=3,
single_deterministic_pass=False)))
trajectories._fields
trajectories.observation.shape
from tf_agents.trajectories.trajectory import to_transition
time_steps, action_steps, next_time_steps = to_transition(trajectories)
time_steps.observation.shape
trajectories.step_type.numpy()
plt.figure(figsize=(10, 6.8))
for row in range(2):
for col in range(3):
plt.subplot(2, 3, row * 3 + col + 1)
plot_observation(trajectories.observation[row, col].numpy())
plt.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0, wspace=0.02)
save_fig("sub_episodes_plot")
plt.show()
###Output
Saving figure sub_episodes_plot
###Markdown
Now let's create the dataset:
###Code
dataset = replay_buffer.as_dataset(
sample_batch_size=64,
num_steps=2,
num_parallel_calls=3).prefetch(3)
###Output
_____no_output_____
###Markdown
Convert the main functions to TF Functions for better performance:
###Code
from tf_agents.utils.common import function
collect_driver.run = function(collect_driver.run)
agent.train = function(agent.train)
###Output
_____no_output_____
###Markdown
And now we are ready to run the main loop!
###Code
def train_agent(n_iterations):
time_step = None
policy_state = agent.collect_policy.get_initial_state(tf_env.batch_size)
iterator = iter(dataset)
for iteration in range(n_iterations):
time_step, policy_state = collect_driver.run(time_step, policy_state)
trajectories, buffer_info = next(iterator)
train_loss = agent.train(trajectories)
print("\r{} loss:{:.5f}".format(
iteration, train_loss.loss.numpy()), end="")
if iteration % 1000 == 0:
log_metrics(train_metrics)
###Output
_____no_output_____
###Markdown
Run the next cell to train the agent for 50,000 steps. Then look at its behavior by running the following cell. You can run these two cells as many times as you wish. The agent will keep improving! It will likely take over 200,000 iterations for the agent to become reasonably good.
###Code
train_agent(n_iterations=50000)
frames = []
def save_frames(trajectory):
global frames
frames.append(tf_env.pyenv.envs[0].render(mode="rgb_array"))
watch_driver = DynamicStepDriver(
tf_env,
agent.policy,
observers=[save_frames, ShowProgress(1000)],
num_steps=1000)
final_time_step, final_policy_state = watch_driver.run()
plot_animation(frames)
###Output
_____no_output_____
###Markdown
If you want to save an animated GIF to show off your agent to your friends, here's one way to do it:
###Code
import PIL
image_path = os.path.join("images", "rl", "breakout.gif")
frame_images = [PIL.Image.fromarray(frame) for frame in frames[:150]]
frame_images[0].save(image_path, format='GIF',
append_images=frame_images[1:],
save_all=True,
duration=30,
loop=0)
%%html
<img src="images/rl/breakout.gif" />
###Output
_____no_output_____
###Markdown
Extra material Deque vs Rotating List The `deque` class offers fast append, but fairly slow random access (for large replay memories):
###Code
from collections import deque
np.random.seed(42)
mem = deque(maxlen=1000000)
for i in range(1000000):
mem.append(i)
[mem[i] for i in np.random.randint(1000000, size=5)]
%timeit mem.append(1)
%timeit [mem[i] for i in np.random.randint(1000000, size=5)]
###Output
182 µs ± 6.9 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
Alternatively, you could use a rotating list like this `ReplayMemory` class. This would make random access faster for large replay memories:
###Code
class ReplayMemory:
def __init__(self, max_size):
self.buffer = np.empty(max_size, dtype=np.object)
self.max_size = max_size
self.index = 0
self.size = 0
def append(self, obj):
self.buffer[self.index] = obj
self.size = min(self.size + 1, self.max_size)
self.index = (self.index + 1) % self.max_size
def sample(self, batch_size):
indices = np.random.randint(self.size, size=batch_size)
return self.buffer[indices]
mem = ReplayMemory(max_size=1000000)
for i in range(1000000):
mem.append(i)
mem.sample(5)
%timeit mem.append(1)
%timeit mem.sample(5)
###Output
9.24 µs ± 227 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
###Markdown
Creating a Custom TF-Agents Environment To create a custom TF-Agent environment, you just need to write a class that inherits from the `PyEnvironment` class and implements a few methods. For example, the following minimal environment represents a simple 4x4 grid. The agent starts in one corner (0,0) and must move to the opposite corner (3,3). The episode is done if the agent reaches the goal (it gets a +10 reward) or if the agent goes out of bounds (-1 reward). The actions are up (0), down (1), left (2) and right (3).
###Code
class MyEnvironment(tf_agents.environments.py_environment.PyEnvironment):
def __init__(self, discount=1.0):
super().__init__()
self._action_spec = tf_agents.specs.BoundedArraySpec(
shape=(), dtype=np.int32, name="action", minimum=0, maximum=3)
self._observation_spec = tf_agents.specs.BoundedArraySpec(
shape=(4, 4), dtype=np.int32, name="observation", minimum=0, maximum=1)
self.discount = discount
def action_spec(self):
return self._action_spec
def observation_spec(self):
return self._observation_spec
def _reset(self):
self._state = np.zeros(2, dtype=np.int32)
obs = np.zeros((4, 4), dtype=np.int32)
obs[self._state[0], self._state[1]] = 1
return tf_agents.trajectories.time_step.restart(obs)
def _step(self, action):
self._state += [(-1, 0), (+1, 0), (0, -1), (0, +1)][action]
reward = 0
obs = np.zeros((4, 4), dtype=np.int32)
done = (self._state.min() < 0 or self._state.max() > 3)
if not done:
obs[self._state[0], self._state[1]] = 1
if done or np.all(self._state == np.array([3, 3])):
reward = -1 if done else +10
return tf_agents.trajectories.time_step.termination(obs, reward)
else:
return tf_agents.trajectories.time_step.transition(obs, reward,
self.discount)
###Output
_____no_output_____
###Markdown
The action and observation specs will generally be instances of the `ArraySpec` or `BoundedArraySpec` classes from the `tf_agents.specs` package (check out the other specs in this package as well). Optionally, you can also define a `render()` method, a `close()` method to free resources, as well as a `time_step_spec()` method if you don't want the `reward` and `discount` to be 32-bit float scalars. Note that the base class takes care of keeping track of the current time step, which is why we must implement `_reset()` and `_step()` rather than `reset()` and `step()`.
###Code
my_env = MyEnvironment()
time_step = my_env.reset()
time_step
time_step = my_env.step(1)
time_step
###Output
_____no_output_____
###Markdown
Exercise Solutions 1. to 7.See Appendix A. 8._Exercise: Use policy gradients to solve OpenAI Gym's LunarLander-v2 environment. You will need to install the Box2D dependencies (`%pip install -U gym[box2d]`)._ Let's start by creating a LunarLander-v2 environment:
###Code
env = gym.make("LunarLander-v2")
###Output
_____no_output_____
###Markdown
The inputs are 8-dimensional:
###Code
env.observation_space
env.seed(42)
obs = env.reset()
obs
###Output
_____no_output_____
###Markdown
From the [source code](https://github.com/openai/gym/blob/master/gym/envs/box2d/lunar_lander.py), we can see that these each 8D observation (x, y, h, v, a, w, l, r) correspond to:* x,y: the coordinates of the spaceship. It starts at a random location near (0, 1.4) and must land near the target at (0, 0).* h,v: the horizontal and vertical speed of the spaceship. It starts with a small random speed.* a,w: the spaceship's angle and angular velocity.* l,r: whether the left or right leg touches the ground (1.0) or not (0.0). The action space is discrete, with 4 possible actions:
###Code
env.action_space
###Output
_____no_output_____
###Markdown
Looking at the [LunarLander-v2's description](https://gym.openai.com/envs/LunarLander-v2/), these actions are:* do nothing* fire left orientation engine* fire main engine* fire right orientation engine Let's create a simple policy network with 4 output neurons (one per possible action):
###Code
keras.backend.clear_session()
np.random.seed(42)
tf.random.set_seed(42)
n_inputs = env.observation_space.shape[0]
n_outputs = env.action_space.n
model = keras.models.Sequential([
keras.layers.Dense(32, activation="relu", input_shape=[n_inputs]),
keras.layers.Dense(32, activation="relu"),
keras.layers.Dense(n_outputs, activation="softmax"),
])
###Output
_____no_output_____
###Markdown
Note that we're using the softmax activation function in the output layer, instead of the sigmoid activation function like we did for the CartPole-v1 environment. This is because we only had two possible actions for the CartPole-v1 environment, so a binary classification model worked fine. However, since we now how more than two possible actions, we need a multiclass classification model. Next, let's reuse the `play_one_step()` and `play_multiple_episodes()` functions we defined for the CartPole-v1 Policy Gradient code above, but we'll just tweak the `play_one_step()` function to account for the fact that the model is now a multiclass classification model rather than a binary classification model. We'll also tweak the `play_multiple_episodes()` function to call our tweaked `play_one_step()` function rather than the original one, and we add a big penalty if the spaceship does not land (or crash) before a maximum number of steps.
###Code
def lander_play_one_step(env, obs, model, loss_fn):
with tf.GradientTape() as tape:
probas = model(obs[np.newaxis])
logits = tf.math.log(probas + keras.backend.epsilon())
action = tf.random.categorical(logits, num_samples=1)
loss = tf.reduce_mean(loss_fn(action, probas))
grads = tape.gradient(loss, model.trainable_variables)
obs, reward, done, info = env.step(action[0, 0].numpy())
return obs, reward, done, grads
def lander_play_multiple_episodes(env, n_episodes, n_max_steps, model, loss_fn):
all_rewards = []
all_grads = []
for episode in range(n_episodes):
current_rewards = []
current_grads = []
obs = env.reset()
for step in range(n_max_steps):
obs, reward, done, grads = lander_play_one_step(env, obs, model, loss_fn)
current_rewards.append(reward)
current_grads.append(grads)
if done:
break
all_rewards.append(current_rewards)
all_grads.append(current_grads)
return all_rewards, all_grads
###Output
_____no_output_____
###Markdown
We'll keep exactly the same `discount_rewards()` and `discount_and_normalize_rewards()` functions as earlier:
###Code
def discount_rewards(rewards, discount_rate):
discounted = np.array(rewards)
for step in range(len(rewards) - 2, -1, -1):
discounted[step] += discounted[step + 1] * discount_rate
return discounted
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate)
for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean) / reward_std
for discounted_rewards in all_discounted_rewards]
###Output
_____no_output_____
###Markdown
Now let's define some hyperparameters:
###Code
n_iterations = 200
n_episodes_per_update = 16
n_max_steps = 1000
discount_rate = 0.99
###Output
_____no_output_____
###Markdown
Again, since the model is a multiclass classification model, we must use the categorical cross-entropy rather than the binary cross-entropy. Moreover, since the `lander_play_one_step()` function sets the targets as class indices rather than class probabilities, we must use the `sparse_categorical_crossentropy()` loss function:
###Code
optimizer = keras.optimizers.Nadam(learning_rate=0.005)
loss_fn = keras.losses.sparse_categorical_crossentropy
###Output
_____no_output_____
###Markdown
We're ready to train the model. Let's go!
###Code
env.seed(42)
mean_rewards = []
for iteration in range(n_iterations):
all_rewards, all_grads = lander_play_multiple_episodes(
env, n_episodes_per_update, n_max_steps, model, loss_fn)
mean_reward = sum(map(sum, all_rewards)) / n_episodes_per_update
print("\rIteration: {}/{}, mean reward: {:.1f} ".format(
iteration + 1, n_iterations, mean_reward), end="")
mean_rewards.append(mean_reward)
all_final_rewards = discount_and_normalize_rewards(all_rewards,
discount_rate)
all_mean_grads = []
for var_index in range(len(model.trainable_variables)):
mean_grads = tf.reduce_mean(
[final_reward * all_grads[episode_index][step][var_index]
for episode_index, final_rewards in enumerate(all_final_rewards)
for step, final_reward in enumerate(final_rewards)], axis=0)
all_mean_grads.append(mean_grads)
optimizer.apply_gradients(zip(all_mean_grads, model.trainable_variables))
###Output
Iteration: 200/200, mean reward: 134.2
###Markdown
Let's look at the learning curve:
###Code
import matplotlib.pyplot as plt
plt.plot(mean_rewards)
plt.xlabel("Episode")
plt.ylabel("Mean reward")
plt.grid()
plt.show()
###Output
_____no_output_____
###Markdown
Now let's look at the result!
###Code
def lander_render_policy_net(model, n_max_steps=500, seed=42):
frames = []
env = gym.make("LunarLander-v2")
env.seed(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
obs = env.reset()
for step in range(n_max_steps):
frames.append(env.render(mode="rgb_array"))
probas = model(obs[np.newaxis])
logits = tf.math.log(probas + keras.backend.epsilon())
action = tf.random.categorical(logits, num_samples=1)
obs, reward, done, info = env.step(action[0, 0].numpy())
if done:
break
env.close()
return frames
frames = lander_render_policy_net(model, seed=42)
plot_animation(frames)
###Output
_____no_output_____
###Markdown
That's pretty good. You can try training it for longer and/or tweaking the hyperparameters to see if you can get it to go over 200. 9._Exercise: Use TF-Agents to train an agent that can achieve a superhuman level at SpaceInvaders-v4 using any of the available algorithms._ Please follow the steps in the [Using TF-Agents to Beat Breakout](http://localhost:8888/notebooks/18_reinforcement_learning.ipynbUsing-TF-Agents-to-Beat-Breakout) section above, replacing `"Breakout-v4"` with `"SpaceInvaders-v4"`. There will be a few things to tweak, however. For example, the Space Invaders game does not require the user to press FIRE to begin the game. Instead, the player's laser cannon blinks for a few seconds then the game starts automatically. For better performance, you may want to skip this blinking phase (which lasts about 40 steps) at the beginning of each episode and after each life lost. Indeed, it's impossible to do anything at all during this phase, and nothing moves. One way to do this is to use the following custom environment wrapper, instead of the `AtariPreprocessingWithAutoFire` wrapper:
###Code
class AtariPreprocessingWithSkipStart(AtariPreprocessing):
def skip_frames(self, num_skip):
for _ in range(num_skip):
super().step(0) # NOOP for num_skip steps
def reset(self, **kwargs):
obs = super().reset(**kwargs)
self.skip_frames(40)
return obs
def step(self, action):
lives_before_action = self.ale.lives()
obs, rewards, done, info = super().step(action)
if self.ale.lives() < lives_before_action and not done:
self.skip_frames(40)
return obs, rewards, done, info
###Output
_____no_output_____ |
notebooks/01.01_check_axels.ipynb | ###Markdown
Check Axel's calculations. Purpose* Axel has made a small script to compare AIS and GPS data.* This script will be examined and understood in this notebook. Results
* the total sailed distance differs about 6% between SSPA AIS and GPS
* This is however disregarding missing data and that the SSPA AIS has some data reduction.
* If the 0-1 kts speeds are disregarded in the comparison, the difference is 2%. Setup
###Code
# %load imports.py
from typing import no_type_check
%matplotlib inline
%load_ext autoreload
%autoreload 2
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pyaisdb.database import DB
db = DB()
df_speed_distances = pd.DataFrame(dtype='float64')
s_gps = pd.Series(dtype='float64')
s_ais = pd.Series(dtype='float64')
###Output
_____no_output_____
###Markdown
GPS data
###Code
for i in range(30):
sql = f"""with blue_data as (SELECT time_info, lag(time_info, 1) OVER (ORDER by time_info ASC) as next_time, sog,
ST_Distance(pos::geography, lag(pos::geography, 1) OVER (ORDER by time_info ASC)) as dist
FROM projects._49145341_d2e2f_blue_data_varmdo
where time_info < '2020-07-19 23:59:59+02'
and time_info > '2020-07-10 00:00:00+02'
)
select sum(dist)/1852
from blue_data
where sog >= {i} and sog < {i + 1}"""
# print(sql)
distance = db.execute_and_return(sql)[0][0]
if distance:
#print(f'{i + 0.5} {round(result, 2)}')
speed = i + 0.5
s_gps[speed] = distance
###Output
_____no_output_____
###Markdown
AIS data
###Code
for i in range(30):
sql = f"""select sum(st_length(segment::geography)) / 1852
from segments_sjfv_2020
where sog>={i} and sog < {i+1}
and mmsi=265520390
and date2 < '2020-07-19 23:59:59+02'
and date1 > '2020-07-10 00:00:00+02' """
distance = db.execute_and_return(sql)[0][0]
if distance:
#print(f'{i + 0.5} {round(result, 2)}')
speed = i + 0.5
s_ais[speed] = distance
df_speed_distances['GPS'] = s_gps
df_speed_distances['AIS'] = s_ais
df_speed_distances.index.name='speed'
df_speed_distances.head()
df_speed_distances.describe()
df_speed_distances.sum()
df_speed_distances.sum().pct_change()
fig,ax=plt.subplots()
fig.set_size_inches(17,7)
df_speed_distances.plot(style='.-',ax=ax, kind='bar');
ax.grid(True)
ax.set_ylabel('Distance [NM]')
ax.set_xlabel('Ship speed [kts]')
df_speed_time = df_speed_distances.div(df_speed_distances.index, axis=0)
fig,ax=plt.subplots()
fig.set_size_inches(17,7)
df_speed_time.plot(style='.-',ax=ax, kind='bar');
ax.grid(True)
ax.set_ylabel('Time [s]')
ax.set_xlabel('Ship speed [kts]')
df_speed_distances.iloc[1:].sum().pct_change()
###Output
_____no_output_____ |
RUL Prediction using LSTM Lookback=1.ipynb | ###Markdown
Run to Failure degradation simulation of NASA Turbo Jet Engine Fans Index + 1. Data Analysis + 1.1 Info about data:+ 2. Noise removal and Normalization+ 3. Training LSTM Model to predict RUL+ 4. Testing VAR+ 5 Health Score Assignment+ 6. Analysing Prediction
###Code
import numpy as np
import pandas as pd
import seaborn as sns
import math
import matplotlib.pyplot as plt
from matplotlib import cm
import statsmodels.api as sm
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.vector_ar.var_model import VAR
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import QuantileTransformer , PowerTransformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from keras import optimizers
from keras.models import Sequential
from keras.layers import TimeDistributed, Flatten
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from sklearn.metrics import mean_squared_error
import keras
from keras.models import Sequential,Input,Model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
cmap = cm.get_cmap('Spectral') # Colour map (there are many others)
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from keras.models import load_model
###Output
_____no_output_____
###Markdown
1. Data analysisGo back to Index
###Code
train_file = "train_FD001.txt"
test_file = "test_FD001.txt"
RUL_file = "RUL_FD001.txt"
df = pd.read_csv(train_file,sep=" ",header=None)
df.head()
#columns = ['unit_number','time_in_cycles','setting_1','setting_2','TRA','T2','T24','T30','T50','P2','P15','P30','Nf',
# 'Nc','epr','Ps3 0','phi','NRf','NRc','BPR','farB','htBleed','Nf_dmd','PCNfR_dmd','W31','W32' ]
#delete NaN values
df.drop(columns=[26,27],inplace=True)
columns = ["Section-{}".format(i) for i in range(26)]
df.columns = columns
df.head()
###Output
_____no_output_____
###Markdown
Dataset statistics for each parameter
###Code
df.describe()
###Output
_____no_output_____
###Markdown
1.1 Info about data:- Section-0 is MachineID- Section-1 is time in, Cycles- Section-2...4 is Opertional Settings- Section-5...25 is sensor's data - Data Set: FD001- Train trjectories: 100- Test trajectories: 100- Conditions: ONE (Sea Level)- Fault Modes: ONE (HPC Degradation)
###Code
# Names
MachineID_name = ["Section-0"]
RUL_name = ["Section-1"]
OS_name = ["Section-{}".format(i) for i in range(2,5)]
Sensor_name = ["Section-{}".format(i) for i in range(5,26)]
# Data in pandas DataFrame
MachineID_data = df[MachineID_name]
RUL_data = df[RUL_name]
OS_data = df[OS_name]
Sensor_data = df[Sensor_name]
# Data in pandas Series
MachineID_series = df["Section-0"]
RUL_series = df["Section-1"]
grp = RUL_data.groupby(MachineID_series)
max_cycles = np.array([max(grp.get_group(i)["Section-1"]) for i in MachineID_series.unique()])
print("Max Life >> ",max(max_cycles))
print("Mean Life >> ",np.mean(max_cycles))
print("Min Life >> ",min(max_cycles))
for i in range(26):
print(str(i))
print(df['Section-'+str(i)])
###Output
0
0 1
1 1
2 1
3 1
4 1
...
20626 100
20627 100
20628 100
20629 100
20630 100
Name: Section-0, Length: 20631, dtype: int64
1
0 1
1 2
2 3
3 4
4 5
...
20626 196
20627 197
20628 198
20629 199
20630 200
Name: Section-1, Length: 20631, dtype: int64
2
0 -0.0007
1 0.0019
2 -0.0043
3 0.0007
4 -0.0019
...
20626 -0.0004
20627 -0.0016
20628 0.0004
20629 -0.0011
20630 -0.0032
Name: Section-2, Length: 20631, dtype: float64
3
0 -0.0004
1 -0.0003
2 0.0003
3 0.0000
4 -0.0002
...
20626 -0.0003
20627 -0.0005
20628 0.0000
20629 0.0003
20630 -0.0005
Name: Section-3, Length: 20631, dtype: float64
4
0 100.0
1 100.0
2 100.0
3 100.0
4 100.0
...
20626 100.0
20627 100.0
20628 100.0
20629 100.0
20630 100.0
Name: Section-4, Length: 20631, dtype: float64
5
0 518.67
1 518.67
2 518.67
3 518.67
4 518.67
...
20626 518.67
20627 518.67
20628 518.67
20629 518.67
20630 518.67
Name: Section-5, Length: 20631, dtype: float64
6
0 641.82
1 642.15
2 642.35
3 642.35
4 642.37
...
20626 643.49
20627 643.54
20628 643.42
20629 643.23
20630 643.85
Name: Section-6, Length: 20631, dtype: float64
7
0 1589.70
1 1591.82
2 1587.99
3 1582.79
4 1582.85
...
20626 1597.98
20627 1604.50
20628 1602.46
20629 1605.26
20630 1600.38
Name: Section-7, Length: 20631, dtype: float64
8
0 1400.60
1 1403.14
2 1404.20
3 1401.87
4 1406.22
...
20626 1428.63
20627 1433.58
20628 1428.18
20629 1426.53
20630 1432.14
Name: Section-8, Length: 20631, dtype: float64
9
0 14.62
1 14.62
2 14.62
3 14.62
4 14.62
...
20626 14.62
20627 14.62
20628 14.62
20629 14.62
20630 14.62
Name: Section-9, Length: 20631, dtype: float64
10
0 21.61
1 21.61
2 21.61
3 21.61
4 21.61
...
20626 21.61
20627 21.61
20628 21.61
20629 21.61
20630 21.61
Name: Section-10, Length: 20631, dtype: float64
11
0 554.36
1 553.75
2 554.26
3 554.45
4 554.00
...
20626 551.43
20627 550.86
20628 550.94
20629 550.68
20630 550.79
Name: Section-11, Length: 20631, dtype: float64
12
0 2388.06
1 2388.04
2 2388.08
3 2388.11
4 2388.06
...
20626 2388.19
20627 2388.23
20628 2388.24
20629 2388.25
20630 2388.26
Name: Section-12, Length: 20631, dtype: float64
13
0 9046.19
1 9044.07
2 9052.94
3 9049.48
4 9055.15
...
20626 9065.52
20627 9065.11
20628 9065.90
20629 9073.72
20630 9061.48
Name: Section-13, Length: 20631, dtype: float64
14
0 1.3
1 1.3
2 1.3
3 1.3
4 1.3
...
20626 1.3
20627 1.3
20628 1.3
20629 1.3
20630 1.3
Name: Section-14, Length: 20631, dtype: float64
15
0 47.47
1 47.49
2 47.27
3 47.13
4 47.28
...
20626 48.07
20627 48.04
20628 48.09
20629 48.39
20630 48.20
Name: Section-15, Length: 20631, dtype: float64
16
0 521.66
1 522.28
2 522.42
3 522.86
4 522.19
...
20626 519.49
20627 519.68
20628 520.01
20629 519.67
20630 519.30
Name: Section-16, Length: 20631, dtype: float64
17
0 2388.02
1 2388.07
2 2388.03
3 2388.08
4 2388.04
...
20626 2388.26
20627 2388.22
20628 2388.24
20629 2388.23
20630 2388.26
Name: Section-17, Length: 20631, dtype: float64
18
0 8138.62
1 8131.49
2 8133.23
3 8133.83
4 8133.80
...
20626 8137.60
20627 8136.50
20628 8141.05
20629 8139.29
20630 8137.33
Name: Section-18, Length: 20631, dtype: float64
19
0 8.4195
1 8.4318
2 8.4178
3 8.3682
4 8.4294
...
20626 8.4956
20627 8.5139
20628 8.5646
20629 8.5389
20630 8.5036
Name: Section-19, Length: 20631, dtype: float64
20
0 0.03
1 0.03
2 0.03
3 0.03
4 0.03
...
20626 0.03
20627 0.03
20628 0.03
20629 0.03
20630 0.03
Name: Section-20, Length: 20631, dtype: float64
21
0 392
1 392
2 390
3 392
4 393
...
20626 397
20627 395
20628 398
20629 395
20630 396
Name: Section-21, Length: 20631, dtype: int64
22
0 2388
1 2388
2 2388
3 2388
4 2388
...
20626 2388
20627 2388
20628 2388
20629 2388
20630 2388
Name: Section-22, Length: 20631, dtype: int64
23
0 100.0
1 100.0
2 100.0
3 100.0
4 100.0
...
20626 100.0
20627 100.0
20628 100.0
20629 100.0
20630 100.0
Name: Section-23, Length: 20631, dtype: float64
24
0 39.06
1 39.00
2 38.95
3 38.88
4 38.90
...
20626 38.49
20627 38.30
20628 38.44
20629 38.29
20630 38.37
Name: Section-24, Length: 20631, dtype: float64
25
0 23.4190
1 23.4236
2 23.3442
3 23.3739
4 23.4044
...
20626 22.9735
20627 23.1594
20628 22.9333
20629 23.0640
20630 23.0522
Name: Section-25, Length: 20631, dtype: float64
###Markdown
From the above vizulization its clear that - Section-4 (Oprational Setting-3)- Section-5 (Sensor-1)- Section-9 (Sensor-5)- Section-10 (Sensor-6)- Section-14 (Sensor-10)- Section-20 (Sensor-16)- Section-22 (Sensor-18)- Section-23 (Sensor-19)Does not play a vital role in variation of data and there std is also almost 0 so, these sensor data is useless for us hence, we can drop this coloumn data
###Code
#delete columns with constant values that do not carry information about the state of the unit
#data = pd.concat([RUL_data,OS_data,Sensor_data], axis=1)
df.drop(columns=["Section-0",
"Section-4", # Operatinal Setting
"Section-5", # Sensor data
"Section-9", # Sensor data
"Section-10", # Sensor data
"Section-14",# Sensor data
"Section-20",# Sensor data
"Section-22",# Sensor data
"Section-23"] , inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
2. Noise removal and NormalizationGo back to Index
###Code
print(type(df))
gen = MinMaxScaler(feature_range=(0, 1))
df = gen.fit_transform(df)
df = pd.DataFrame(df)
#df = df.rolling(20).mean()
pt = PowerTransformer()
df = pt.fit_transform(df)
# grouping w.r.t MID (Machine ID)
# col_names = df.columns
# def grouping(datafile, mid_series):
# data = [x for x in datafile.groupby(mid_series)]
# return data
df
###Output
_____no_output_____
###Markdown
3. Training LSTM Model to predict RULGo back to Index
###Code
def RUL_df():
rul_lst = [j for i in MachineID_series.unique() for j in np.array(grp.get_group(i)[::-1]["Section-1"])]
rul_col = pd.DataFrame({"rul":rul_lst})
return rul_col
RUL_df().head()
X_train = np.array(df)
y_train = np.array(RUL_df()).reshape(-1,1)
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)
print(X_train.shape,y_train.shape)
print(max_cycles)
print(sum(max_cycles))
count = 0
for cycle_len in max_cycles:
for i in range(1):
y_train[count+i] = 0
count = count + cycle_len
print(count)
print(y_train)
def create_dataset(X, look_back=1):
data = []
for i in range(len(X)-look_back-1):
data.append(X[i:(i+look_back)])
return np.array(data)
X_train = create_dataset(X_train)
y_train = y_train[2:]
print(X_train.shape,y_train.shape)
print(y_train)
def build_model(layers):
#d = 0.2
model = Sequential()
model.add(LSTM(128, input_shape=(layers[1], layers[0]), return_sequences=True))
#model.add(Dropout(d))
model.add(LSTM(64, input_shape=(layers[1], layers[0]), return_sequences=False))
#model.add(Dropout(d))
model.add(Dense(16,kernel_initializer='uniform',activation='relu'))
model.add(Dense(1,kernel_initializer='uniform',activation='relu'))
model.compile(loss='mean_squared_error',optimizer='adam')
return model
model = build_model([17,1])
print(model.summary())
history = model.fit(
X_train,
y_train,
batch_size=512,
epochs=75,
validation_split=0.15,
verbose=1)
print(history.history.keys())
# model.save('LSTM_with_lookback_1.h5')
# Loss Graph
plt.plot(history.epoch, history.history['loss'] , label = "loss")
plt.plot(history.epoch, history.history['val_loss'] , label = "val_loss")
plt.legend()
plt.show()
y_train_pred = model.predict(X_train)
print("mean_squared_error >> ", mean_squared_error(y_train,y_train_pred))
print("root_mean_squared_error >> ", math.sqrt(mean_squared_error(y_train,y_train_pred)))
print("mean_absolute_error >>",mean_absolute_error(y_train,y_train_pred))
###Output
mean_squared_error >> 1415.4031449527815
root_mean_squared_error >> 37.62184398660945
mean_absolute_error >> 25.70032441054944
###Markdown
LSTM (Lookback = 1) without VAR
###Code
import numpy as np
import pandas as pd
import seaborn as sns
import math
import matplotlib.pyplot as plt
from matplotlib import cm
import statsmodels.api as sm
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.vector_ar.var_model import VAR
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import QuantileTransformer , PowerTransformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from keras import optimizers
from keras.models import Sequential
from keras.layers import TimeDistributed, Flatten
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
cmap = cm.get_cmap('Spectral') # Colour map (there are many others)
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import r2_score
from keras.models import load_model
import pickle
model = load_model('LSTM_with_lookback_1.h5')
train_file = "train_FD001.txt"
test_file = "test_FD001.txt"
RUL_file = "RUL_FD001.txt"
columns = ["Section-{}".format(i) for i in range(26)]
df_test = pd.read_csv(test_file, sep=" ",header=None)
df_test.drop(columns=[26,27],inplace=True)
df_test.columns = columns
df_test.head()
df_rul = pd.read_csv(RUL_file, names=['rul'])
df_rul.head()
RUL_name = ["Section-1"]
RUL_data = df_test[RUL_name]
MachineID_series = df_test["Section-0"]
grp = RUL_data.groupby(MachineID_series)
max_cycles = np.array([max(grp.get_group(i)["Section-1"]) for i in MachineID_series.unique()])
max_cycles[0] = max_cycles[0] - 2
df_test.drop(df_test[["Section-0",
"Section-4", # Operatinal Setting
"Section-5", # Sensor data
"Section-9", # Sensor data
"Section-10", # Sensor data
"Section-14",# Sensor data
"Section-20",# Sensor data
"Section-22",# Sensor data
"Section-23"]], axis=1 , inplace=True)
#df_test = df_test.groupby(["Section-0"])
#print(df_test)
gen = MinMaxScaler(feature_range=(0, 1))
df_test = gen.fit_transform(df_test)
df_test = pd.DataFrame(df_test)
#df_test = df_test.rolling(20).mean()
pt = PowerTransformer()
df_test = pt.fit_transform(df_test)
df_test=np.nan_to_num(df_test)
X_test = np.array(df_test)
y_test = np.array(df_rul)
def create_dataset(X, look_back=1):
data = []
for i in range(len(X)-look_back-1):
data.append(X[i:(i+look_back)])
return np.array(data)
X_test = create_dataset(X_test)
#y_test = y_test[6:]
print(X_test.shape,y_test.shape)
pred = model.predict(X_test)
pred.shape
def scoring_function(actual,predicted):
d = []
for i in range(len(predicted)):
d.append((predicted[i] - actual[i]))
scores = []
for i in range(len(d)):
if d[i] >= 0:
scores.append(math.exp(d[i]/10) - 1)
else :
scores.append(math.exp((-1*d[i])/13) - 1)
return sum(scores)
final_pred = []
count = 0
for i in range(100):
j = max_cycles[i]
temp = pred[count+j-1]
count=count+j
final_pred.append(int(temp))
print(final_pred)
fig = plt.figure(figsize=(18,10))
plt.plot(final_pred,color='red', label='prediction')
plt.plot(y_test,color='blue', label='y_test')
fig.suptitle('RUL Prediction using LSTM with lookack=1', fontsize=35)
plt.xlabel("Engine Number", fontsize=35)
plt.ylabel("Remaining Useful Life", fontsize=35)
plt.legend(loc='upper left')
plt.grid()
plt.show()
print("mean_squared_error >> ", mean_squared_error(y_test,final_pred))
print("root_mean_squared_error >> ", math.sqrt(mean_squared_error(y_test,final_pred)))
print("mean_absolute_error >>",mean_absolute_error(y_test,final_pred))
print("scoring function >>",scoring_function(y_test,final_pred))
df=pd.DataFrame(np.arange(1,101))
df['Actual']=y_test
df['Predicted']=final_pred
df=df.drop([0],axis=1)
sns.set_theme(style="whitegrid")
a4_dims = (18,10)
fig, ax = plt.subplots(figsize=a4_dims)
sns.lineplot(data = df,markers=True, dashes=False)
fig.suptitle('RUL Prediction using LSTM with lookack=1', fontsize=35)
plt.xlabel("Engine Number", fontsize=35)
plt.ylabel("Remaining Useful Life", fontsize=35)
# For training results
train_file = "train_FD001.txt"
test_file = "test_FD001.txt"
RUL_file = "RUL_FD001.txt"
df = pd.read_csv(train_file,sep=" ",header=None)
#columns = ['unit_number','time_in_cycles','setting_1','setting_2','TRA','T2','T24','T30','T50','P2','P15','P30','Nf',
# 'Nc','epr','Ps3 0','phi','NRf','NRc','BPR','farB','htBleed','Nf_dmd','PCNfR_dmd','W31','W32' ]
#delete NaN values
df.drop(columns=[26,27],inplace=True)
columns = ["Section-{}".format(i) for i in range(26)]
df.columns = columns
# Names
MachineID_name = ["Section-0"]
RUL_name = ["Section-1"]
OS_name = ["Section-{}".format(i) for i in range(2,5)]
Sensor_name = ["Section-{}".format(i) for i in range(5,26)]
# Data in pandas DataFrame
MachineID_data = df[MachineID_name]
RUL_data = df[RUL_name]
OS_data = df[OS_name]
Sensor_data = df[Sensor_name]
# Data in pandas Series
MachineID_series = df["Section-0"]
RUL_series = df["Section-1"]
grp = RUL_data.groupby(MachineID_series)
max_cycles = np.array([max(grp.get_group(i)["Section-1"]) for i in MachineID_series.unique()])
print("Max Life >> ",max(max_cycles))
print("Mean Life >> ",np.mean(max_cycles))
print("Min Life >> ",min(max_cycles))
#delete columns with constant values that do not carry information about the state of the unit
#data = pd.concat([RUL_data,OS_data,Sensor_data], axis=1)
df.drop(columns=["Section-0",
"Section-4", # Operatinal Setting
"Section-5", # Sensor data
"Section-9", # Sensor data
"Section-10", # Sensor data
"Section-14",# Sensor data
"Section-20",# Sensor data
"Section-22",# Sensor data
"Section-23"] , inplace=True)
gen = MinMaxScaler(feature_range=(0, 1))
df = gen.fit_transform(df)
df = pd.DataFrame(df)
#df = df.rolling(20).mean()
pt = PowerTransformer()
df = pt.fit_transform(df)
df=np.nan_to_num(df)
def RUL_df():
rul_lst = [j for i in MachineID_series.unique() for j in np.array(grp.get_group(i)[::-1]["Section-1"])]
rul_col = pd.DataFrame({"rul":rul_lst})
return rul_col
RUL_df().head()
X_train = np.array(df)
y_train = np.array(RUL_df()).reshape(-1,1)
count = 0
for cycle_len in max_cycles:
for i in range(6):
y_train[count+i] = 0
count = count + cycle_len
X_train = create_dataset(X_train)
y_train = y_train[2:]
y_train_pred = model.predict(X_train)
print("mean_squared_error >> ", mean_squared_error(y_train,y_train_pred))
print("root_mean_squared_error >> ", math.sqrt(mean_squared_error(y_train,y_train_pred)))
print("mean_absolute_error >>",mean_absolute_error(y_train,y_train_pred))
###Output
Max Life >> 362
Mean Life >> 206.31
Min Life >> 128
mean_squared_error >> 1960.8811887310094
root_mean_squared_error >> 44.28183813631735
mean_absolute_error >> 28.22695503478032
|
C3/W4/ungraded_labs/C3_W4_Lab_2_irish_lyrics.ipynb | ###Markdown
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
**Note:** This notebook can run using TensorFlow 2.5.0
###Code
#!pip install tensorflow==2.5.0
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import numpy as np
# irish-lyrics-eof.txt
!gdown --id 15UqmiIm0xwh9mt0IYq2z3jHaauxQSTQT
tokenizer = Tokenizer()
data = open('./irish-lyrics-eof.txt').read()
corpus = data.lower().split("\n")
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
print(tokenizer.word_index)
print(total_words)
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i+1]
input_sequences.append(n_gram_sequence)
# pad sequences
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
# create predictors and label
xs, labels = input_sequences[:,:-1],input_sequences[:,-1]
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
print(tokenizer.word_index['in'])
print(tokenizer.word_index['the'])
print(tokenizer.word_index['town'])
print(tokenizer.word_index['of'])
print(tokenizer.word_index['athy'])
print(tokenizer.word_index['one'])
print(tokenizer.word_index['jeremy'])
print(tokenizer.word_index['lanigan'])
print(xs[6])
print(ys[6])
print(xs[5])
print(ys[5])
print(tokenizer.word_index)
model = Sequential()
model.add(Embedding(total_words, 100, input_length=max_sequence_len-1))
model.add(Bidirectional(LSTM(150)))
model.add(Dense(total_words, activation='softmax'))
adam = Adam(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
#earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')
history = model.fit(xs, ys, epochs=100, verbose=1)
#print model.summary()
print(model)
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.show()
plot_graphs(history, 'accuracy')
seed_text = "I've got a bad feeling about this"
next_words = 100
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
predicted = model.predict_classes(token_list, verbose=0)
output_word = ""
for word, index in tokenizer.word_index.items():
if index == predicted:
output_word = word
break
seed_text += " " + output_word
print(seed_text)
###Output
_____no_output_____
###Markdown
Ungraded Lab: Generating Text from Irish LyricsIn the previous lab, you trained a model on just a single song. You might have found that the output text can quickly become gibberish or repetitive. Even if you tweak the hyperparameters, the model will still be limited by its vocabulary of only 263 words. The model will be more flexible if you train it on a much larger corpus and that's what you'll be doing in this lab. You will use lyrics from more Irish songs then see how the generated text looks like. You will also see how this impacts the process from data preparation to model training. Let's get started! Imports
###Code
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
###Output
_____no_output_____
###Markdown
Building the Word VocabularyYou will first download the lyrics dataset. These will be from a compilation of traditional Irish songs and you can see them [here](https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/main/C3/W4/misc/Laurences_generated_poetry.txt).
###Code
# Download the dataset
!wget https://storage.googleapis.com/tensorflow-1-public/course3/irish-lyrics-eof.txt
###Output
--2022-04-13 12:06:37-- https://storage.googleapis.com/tensorflow-1-public/course3/irish-lyrics-eof.txt
Resolving storage.googleapis.com (storage.googleapis.com)... 74.125.132.128, 74.125.69.128, 64.233.181.128, ...
Connecting to storage.googleapis.com (storage.googleapis.com)|74.125.132.128|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 68970 (67K) [text/plain]
Saving to: ‘irish-lyrics-eof.txt’
irish-lyrics-eof.tx 0%[ ] 0 --.-KB/s
irish-lyrics-eof.tx 100%[===================>] 67.35K --.-KB/s in 0.001s
2022-04-13 12:06:37 (54.3 MB/s) - ‘irish-lyrics-eof.txt’ saved [68970/68970]
###Markdown
Next, you will lowercase and split the plain text into a list of sentences:
###Code
# Load the dataset
data = open('./irish-lyrics-eof.txt').read()
# Lowercase and split the text
corpus = data.lower().split("\n")
# Preview the result
print(corpus)
###Output
['come all ye maidens young and fair', 'and you that are blooming in your prime', 'always beware and keep your garden fair', 'let no man steal away your thyme', 'for thyme it is a precious thing', 'and thyme brings all things to my mind', 'nlyme with all its flavours, along with all its joys', 'thyme, brings all things to my mind', 'once i and a bunch of thyme', 'i thought it never would decay', 'then came a lusty sailor', 'who chanced to pass my way', 'and stole my bunch of thyme away', 'the sailor gave to me a rose', 'a rose that never would decay', 'he gave it to me to keep me reminded', 'of when he stole my thyme away', 'sleep, my child, and peace attend thee', 'all through the night', 'guardian angels god will send thee', 'soft the drowsy hours are creeping', 'hill and dale in slumber sleeping', 'i my loving vigil keeping', 'while the moon her watch is keeping', 'while the weary world is sleeping', 'oer thy spirit gently stealing', 'visions of delight revealing', 'breathes a pure and holy feeling', 'though i roam a minstrel lonely', 'my true harp shall praise sing only', 'loves young dream, alas, is over', 'yet my strains of love shall hover', 'near the presence of my lover', 'hark, a solemn bell is ringing', 'clear through the night', 'thou, my love, art heavenward winging', 'home through the night', 'earthly dust from off thee shaken', 'soul immortal shalt thou awaken', 'with thy last dim journey taken', 'oh please neer forget me though waves now lie oer me', 'i was once young and pretty and my spirit ran free', 'but destiny tore me from country and loved ones', 'and from the new land i was never to see.', 'a poor emigrants daughter too frightened to know', 'i was leaving forever the land of my soul', 'amid struggle and fear my parents did pray', 'to place courage to leave oer the longing to stay.', 'they spoke of a new land far away cross the sea', 'and of peace and good fortune for my brothers and me', 'so we parted from townland with much weeping and pain', 'kissed the loved ones and the friends we would neer see again.', 'the vessel was crowded with disquieted folk', 'the escape from past hardship sustaining their hope', 'but as the last glimpse of ireland faded into the mist', 'each one fought back tears and felt strangely alone.', 'the seas roared in anger, making desperate our plight', 'and a fever came oer me that worsened next night', 'then delirium possessed me and clouded my mind', 'and i for a moment saw that land left behind.', 'i could hear in the distance my dear mothers wailing', 'and the prayers of three brothers that id see no more', 'and i felt fathers tears as he begged for forgiveness', 'for seeking a new life on the still distant shore.', 'over in killarney', 'many years ago,', 'me mither sang a song to me', 'in tones so sweet and low.', 'just a simple little ditty,', 'in her good ould irish way,', 'and ld give the world if she could sing', 'that song to me this day.', 'too-ra-loo-ra-loo-ral, too-ra-loo-ra-li,', 'too-ra-loo-ra-loo-ral, hush now, dont you cry!', 'too-ra-loo-ra-loo-ral, thats an irish lullaby.', 'oft in dreams i wander', 'to that cot again,', 'i feel her arms a-huggin me', 'as when she held me then.', 'and i hear her voice a -hummin', 'to me as in days of yore,', 'when she used to rock me fast asleep', 'outside the cabin door.', 'and who are you, me pretty fair maid', 'and who are you, me honey?', 'she answered me quite modestly:', 'i am me mothers darling.', 'with me too-ry-ay', 'fol-de-diddle-day', 'di-re fol-de-diddle', 'dai-rie oh.', 'and will you come to me mothers house,', 'when the sun is shining clearly', 'ill open the door and ill let you in', 'and divil o one would hear us.', 'so i went to her house in the middle of the night', 'when the moon was shining clearly', 'shc opened the door and she let me in', 'and divil the one did hear us.', 'she took me horse by the bridle and the bit', 'and she led him to the stable', 'saying theres plenty of oats for a soldiers horse,', 'to eat it if hes able.', 'then she took me by the lily-white hand', 'and she led me to the table', 'saying: theres plenty of wine for a soldier boy,', 'to drink it if youre able.', 'then i got up and made the bed', 'and i made it nice and aisy', 'then i got up and laid her down', 'saying: lassie, are you able?', 'and there we lay till the break of day', 'and divil a one did hear us', 'then i arose and put on me clothes', 'saying: lassie, i must leave you.', 'and when will you return again', 'and when will we get married', 'when broken shells make christmas bells', 'we might well get married', 'in 1803 we sailed out to sea', 'out from the sweet town of derry', 'for australia bound if we didnt all drown', 'and the marks of our fetters we carried.', 'in the rusty iron chains we sighed for our wains', 'as our good wives we left in sorrow.', 'as the mainsails unfurled our curses we hurled', 'on the english and thoughts of tomorrow.', 'oh oh oh oh i wish i was back home in derry.', 'i cursed them to hell as our bow fought the swell.', 'our ship danced like a moth in the firelights.', 'white horses rode high as the devil passed by', 'taking souls to hades by twilight.', 'five weeks out to sea we were now forty-three', 'our comrades we buried each morning.', 'in our own slime we were lost in a time.', 'endless night without dawning.', 'van diemans land is a hell for a man', 'to live out his life in slavery.', 'when the climate is raw and the gun makes the law.', 'neither wind nor rain cares for bravery.', 'twenty years have gone by and ive ended me bond', 'and comrades ghosts are behind me.', 'a rebel i came and iii die the same.', 'on the cold winds of night you will find me', 'on the banks of the roses, my love and i sat down', 'and i took out my violin to play my love a tune', 'in the middle of the tune, o she sighed and she said', 'o johnny, lovely johnny, would you leave me', 'o when i was a young man, i heard my father say', 'that hed rather see me dead and buried in the clay', 'sooner than be married to any runaway', 'by the lovely sweet banks of the roses', 'o then i am no runaway and soon ill let them know', 'i can take a good glass or leave it alone', 'and the man that doesnt like me, he can keep', 'his daughter home', 'and young johnny will go roving with another', 'and if ever i get married, twill be in the month of may', 'when the leaves they are green and the meadows', 'they are gay', 'and i and my true love can sit and sport and play', 'on the lovely sweet banks of the roses', 'but black is the colour of my true loves hair.', 'his face is like some rosy fair,', 'the prettiest face and the neatest hands,', 'i love the ground whereon he stands.', 'i love my love and well he knows', 'i love the ground whereon he goes', 'if you no more on earth i see,', 'i cant serve you as you have me.', 'the winters passed and the leaves are green', 'the time is passed that we have seen,', 'but still i hope the time will come', 'when you and i shall be as one.', 'i go to the clyde for to mourn and weep,', 'but satisfied i never could sleep,', 'ill write to you a few short lines', 'ill suffer death ten thousand times.', 'so fare you well, my own true love', 'the time has passed, but i wish you well.', 'when you and i will be as one.', 'i love the ground whereon he goes,', 'the prettiest face, the neatest hands', 'her eyes they shone like the diamonds', 'youd think she was queen of the land', 'and her hair hung over her shoulder', 'tied up with a black velvet band.', 'in a neat little town they call belfast', 'apprenticed to trade i was bound', 'and many an hours sweet happiness', 'i spent in that neat little town.', 'till bad misfortune came oer me', 'that caused me to stray from the land', 'far away from my friends and relations', 'to follow the black velvet band.', 'well, i was out strolling one evening', 'not meaning to go very far', 'when i met with a pretty young damsel', 'who was selling her trade in the bar.', 'when i watched, she took from a customer', 'and slipped it right into my hand', 'then the watch came and put me in prison', 'bad luck to the black velvet band.', 'next morning before judge and jury', 'for a trial i had to appear', 'and the judge, he said, you young fellows...', 'the case against you is quite clear', 'and seven long years is your sentence', 'youre going to van diemans land', 'far away from your friends and relations', 'so come all you jolly young fellows', 'id have you take warning by me', 'whenever youre out on the liquor, me lads,', 'beware of the pretty colleen.', 'shell fill you with whiskey and porter', 'until youre not able to stand', 'and the very next thing that youll know, me lads,', 'youre landed in van diemans land.', 'heres a health to you, bonnie kellswater', 'for its there youll find the pleasures of life', 'and its there youll find a fishing and farming', 'and a bonnie wee girl for your wife', 'on the hills and the glens and the valleys', 'grows the softest of women so fine', 'and the flowers are all dripping with honey', 'there lives martha, a true love of mine', 'bonnie martha, youre the first girl i courted', 'youre the one put my heart in a snare', 'and if ever i should lose you to another', 'i will leave my kellswater so fair', 'for this one and that one may court her', 'but no other can take her from me', 'for i love her as i love my kellswater', 'like the primrose is loved by the bee', 'oh bridgit omalley, you left my heart shaken', 'with a hopeless desolation, id have you to know', 'its the wonders of admiration your quiet face has taken', 'and your beauty will haunt me wherever i go.', 'the white moon above the pale sands, the pale stars above the thorn tree', 'are cold beside my darling, but no purer than she', 'i gaze upon the cold moon till the stars drown in the warm sea', 'and the bright eyes of my darling are never on me.', 'my sunday it is weary, my sunday it is grey now', 'my heart is a cold thing, my heart is a stone', 'all joy is dead within me, my life has gone away now', 'for another has taken my love for his own.', 'the day it is approaching when we were to be married', 'and its rather i would die than live only to grieve', 'oh meet me, my darling, eer the sun sets oer the barley', 'and ill meet you there on the road to drumslieve.', 'oh bridgit omalley, youve left my heart shaken', 'i wish i was in carrigfergus', 'only for nights in ballygrant', 'i would swim over the deepest ocean', 'for my love to find', 'but the sea is wide and i cannot cross over', 'and neither have i the wings to fly', 'i wish i could meet a handsome boatsman', 'to ferry me over, to my love and die.', 'my childhood days bring back sad reflections', 'of happy times i spent so long ago', 'my boyhood friends and my own relations', 'have all passed on now like melting snow.', 'but ill spend my days in endless roaming', 'soft is the grass, my bed is free.', 'ah, to be back now in carrigfergus', 'on that long road down to the sea.', 'but in kilkenny, it is reported', 'on marble stones there as black as ink', 'with gold and silver i would support her', 'but ill sing no more till i get a drink.', 'for im drunk today, and im seldom sober', 'a handsome rover from town to town', 'ah, but im sick now, my days are numbered', 'you may travel far far from your own native land', 'far away oer the mountains, far away oer the foam', 'but of all the fine places that ive ever been', 'sure theres none can compare with the cliffs of doneen.', 'take a view oer the mountains, fine sights youll see there', 'youll see the high rocky mountains oer the west coast of clare', 'oh the town of kilkee and kilrush can be seen', 'from the high rocky slopes round the cliffs of doneen.', 'its a nice place to be on a fine summers day', 'watching all the wild flowers that neer do decay', 'oh the hares and lofty pheasants are plain to be seen', 'making homes for their young round the cliffs of doneen.', 'fare thee well to doneen, fare thee well for a while', 'and to all the kind people im leaving behind', 'to the streams and the meadows where late i have been', 'and the high rocky slopes round the cliffs of doneen.', 'in dublins fair city, where the girls are so pretty', 'i first set my eyes on sweet molly malone', 'as she wheeled her wheel-barrow', 'through streets broad and narrow', 'crying cockles and mussels, alive, alive-o!', 'alive, alive-o! alive, alive-o!', 'she was a fish-monger, but sure twas no wonder', 'for so were her father and mother before', 'and they each wheeled their barrow', 'she died of a fever, and no one could save her', 'and that was the end of sweet molly malone', 'but her ghost wheels her barrow', 'the garden of eden has vanished, they say', 'but i know the lie of it still;', 'just turn to the left at the bridge of finea', 'and stop when halfway to cootehill.', 'tis there i will find it,', 'i know sure enough', 'when fortune has come to me call,', 'oh the grass it is green around ballyjamesduff', 'and the blue sky is over it all.', 'and tones that are tender and tones that are gruff', 'are whispering over the sea,', 'come back, paddy reilly to ballyjamesduff', 'come home, paddy reilly, to me.', 'my mother once told me that when i was born', 'the day that i first saw the light,', 'i looked down the street on that very first morn', 'and gave a great crow of delight.', 'now most newborn babies appear in a huff,', 'and start with a sorrowful squall,', 'but i knew i was born in ballyjamesduff', 'and thats why i smiled on them all.', 'the babys a man, now hes toil-worn and tough', 'still, whispers come over the sea,', 'the night that we danced by the light of the moon,', 'wid phil to the fore wid his flute,', 'when phil threw his lip over come again soon,', 'hes dance the foot out o yer boot!', 'the day that i took long magee by the scruff', 'for slanderin rosie kilrain,', 'then, marchin him straight out of ballyjamesduff,', 'assisted him into a drain.', 'oh, sweet are the dreams, as the dudeen i puff,', 'of whisperings over the sea,', 'ive loved the young women of every land,', 'that always came easy to me;', 'just barrin the belles of the black-a-moor brand', 'and the chocolate shapes of feegee.', 'but that sort of love is a moonshiny stuff,', 'and never will addle me brain,', 'for the bells will be ringin in ballyjamesduff', 'for me and me rosie kilrain!', 'and through all their glamour, their gas and their guff', 'a whisper comes over the sea,', 'ive struck oil at last!', 'ive struck work, and i vow', 'ive struck some remarkable clothes,', 'ive struck a policeman for sayin that now,', 'id go back to my beautiful rose.', 'the belles they may blarney,', 'the boys they may bluff', 'but this i will always maintain,', 'no place in the world like ballyjamesduff', 'no guril (sic) like rosie kilrain.', 'ive paid for my passage, the sea may be rough', 'but borne on each breeze there will be,', 'will you come to the bower oer the free boundless ocean', 'where the stupendous waves roll in thundering motion,', 'where the mermaids are seen and the fierce tempest gathers,', 'to loved erin the green, the dear land of our fathers.', 'will you come, will you, will you, will you come to the bower?', 'will you come to the land of oneill and odonnell', 'of lord lucan of old and immortal oconnell.', 'where brian drove the danes and saint patrick the vermin', 'and whose valleys remain still most beautiful and charming?', 'you can visit benburb and the storied blackwater,', 'where owen roe met munroe and his chieftains did slaughter', 'where the lambs skip and play on the mossy all over,', 'from those bright golden views to enchanting rostrevor.', 'you can see dublin city, and the fine groves of blarney', 'the bann, boyne, and liffey and the lakes of killarney,', 'you may ride on the tide on the broad majestic shannon', 'you may sail round loch neagh and see storied dungannon.', 'you can visit new ross, gallant wexford, and gorey,', 'where the green was last seen by proud saxon and tory,', 'where the soil is sanctified by the blood of each true man', 'where they died satisfied that their enemies they would not run from.', 'will you come and awake our lost land from its slumber', 'and her fetters well break, links that long are encumbered.', 'and the air will resound with hosannahs to greet you', 'on the shore will be found gallant irishmen to greet you.', 'oh danny boy, the pipes, the pipes are calling', 'from glen to glen, and down the mountain side', 'the summers gone, and all the flowers are dying', 'tis you, tis you must go and i must bide.', 'but come ye back when summers in the meadow', 'or when the valleys hushed and white with snow', 'tis ill be here in sunshine or in shadow', 'oh danny boy, oh danny boy, i love you so.', 'and if you come, when all the flowers are dying', 'and i am dead, as dead i well may be', 'youll come and find the place where i am lying', 'and kneel and say an ave there for me.', 'and i shall hear, tho soft you tread above me', 'and all my dreams will warm and sweeter be', 'if youll not fail to tell me that you love me', 'ill simply sleep in peace until you come to me.', 'i found my love by the gasworks croft', 'dreamed a dream by the old canal', 'kissed my girl by the factory wall', 'dirty old town, dirty old town.', 'clouds are drifting across the moon', 'cats are prowling on their beat', 'springs a girl in the street at night', 'i heard a siren from the docks', 'saw a train set the night on fire', 'smelled the spring in the smokey wind', 'im going to make a good sharp axe', 'shining steel tempered in the fire', 'well chop you down like an old dead tree', 't was down by the salley gardens, my love and i did meet.', 'she crossed the salley gardens with little snow-white feet.', 'she bid me take love easy, as the leaves grow on the tree,', 'but i was young and foolish, and with her did not agree.', 'in a field down by the river, my love and i did stand', 'and on my leaning shoulder, she laid her snow-white hand.', 'she bid me take life easy , as the grass grows on the weirs', 'but i was young and foolish, and now am full of tears.', 'down by the salley gardens, my love and i did meet.', 'when, like the dawning day', 'eileen aroon', 'love sends his early ray', 'eileen aroon.', 'what makes his dawning glow', 'changeless through joy and woe', 'only the constant know', 'were she no longer true', 'what would her lover do', 'fly with a broken chain', 'far oer the bounding main', 'never to love again', 'youth must in time decay', 'beauty must fade away', 'castles are sacked in war', 'chieftains are scattered far', 'truth is a fixed star', 'believe me, if all those endearing young charms', 'which i gaze on so fondly today', 'were to change by tomorrow and fleet in my arms', 'like fairy gifts fading away.', 'thou wouldst still be adored as this moment thou art', 'let thy loveliness fade as it will', 'and around the dear ruin each wish of my heart', 'would entwine itself verdantly still.', 'it is not while beauty and youth are thine own', 'and thy cheeks unprofaned by a tear', 'that the fervor and faith of a soul can be known', 'to which time will but make thee more dear.', 'no, the heart that has truly loved never forgets', 'but as truly loves on to the close', 'as the sunflower turns to her god when he sets', 'the same look which she turned when she rose.', 'ill tell you a story of a row in the town,', 'when the green flag went up and the crown rag came down,', 'twas the neatest and sweetest thing ever you saw,', 'and they played the best games played in erin go bragh.', 'one of our comrades was down at rings end,', 'for the honor of ireland to hold and defend,', 'he had no veteran soldiers but volunteers raw,', 'playing sweet mauser music for erin go bragh.', 'now heres to pat pearse and our comrades who died', 'tom clark, macdonagh, macdiarmada, mcbryde,', 'and heres to james connolly who gave one hurrah,', 'and placed the machine guns for erin go bragh.', 'one brave english captain was ranting that day,', 'saying, give me one hour and ill blow you away,', 'but a big mauser bullet got stuck in his craw,', 'and he died of lead poisoning in erin go bragh.', 'old ceannt and his comrades like lions at bay,', 'from the south dublin union poured death and dismay,', 'and what was their horror when the englishmen saw', 'all the dead khaki soldiers in erin go bragh.', 'now heres to old dublin, and heres her renown,', 'in the long generation her fame will go down,', 'and our children will tell how their forefathers saw,', 'the red blaze of freedom in erin go bragh.', 'of priests we can offer a charmin variety,', 'far renownd for learnin and piety;', 'still, id advance ye widout impropriety,', 'father oflynn as the flowr of them all.', 'cho: heres a health to you, father oflynn,', 'slainte and slainte and slainte agin;', 'powrfulest preacher, and tenderest teacher,', 'and kindliest creature in ould donegal.', 'dont talk of your provost and fellows of trinity,', 'famous forever at greek and latinity,', 'dad and the divils and all at divinity', 'father oflynn d make hares of them all!', 'come, i venture to give ye my word,', 'never the likes of his logic was heard,', 'down from mythology into thayology,', 'truth! and conchology if hed the call.', 'och father oflynn, youve a wonderful way wid you,', 'all ould sinners are wishful to pray wid you,', 'all the young childer are wild for to play wid you,', 'youve such a way wid you, father avick.', 'still for all youve so gentle a soul,', 'gad, youve your flock in the grandest control,', 'checking the crazy ones, coaxin onaisy ones,', 'lifting the lazy ones on wid the stick.', 'and tho quite avoidin all foolish frivolity;', 'still at all seasons of innocent jollity,', 'where was the playboy could claim an equality,', 'at comicality, father, wid you?', 'once the bishop looked grave at your jest,', 'till this remark set him off wid the rest:', 'is it lave gaiety all to the laity?', 'cannot the clergy be irishmen, too?', 'what did i have, said the fine old woman', 'what did i have, this proud old woman did say', 'i had four green fields, each one was a jewel', 'but strangers came and tried to take them from me', 'i had fine strong sons, who fought to save my jewels', 'they fought and they died, and that was my grief said she', 'long time ago, said the fine old woman', 'long time ago, this proud old woman did say', 'there was war and death, plundering and pillage', 'my children starved, by mountain, valley and sea', 'and their wailing cries, they shook the very heavens', 'my four green fields ran red with their blood, said she', 'what have i now, said the fine old woman', 'what have i now, this proud old woman did say', 'i have four green fields, one of thems in bondage', 'in strangers hands, that tried to take it from me', 'but my sons had sons, as brave as were their fathers', 'my fourth green field will bloom once again said she', 'just give me your hand,', 'tabhair dom do lámh.', 'just give me your hand', 'and ill walk with you,', 'through the streets of our land,', 'through the mountains so grand.', 'if you give me your hand.', 'and come along with me.', 'will you give me your hand,', 'and the world it can see,', 'that we can be free,', 'in peace and harmony?', 'from the north to the south.', 'from the east to the west.', 'every mountain, every valley,', 'every bush and birds nest!', 'for the world it is ours.', 'all the sea and the land,', 'to destroy or command,', 'in a gesture of peace.', 'will you give me your hand', 'and all troubles will cease,', 'for the strong and the weak,', 'for the rich and the poor?', 'all peoples and creeds,', 'lets meet their needs.', 'with a passion, we can fashion,', 'a new world of love!', 'by day and night,', 'through all struggle and strife,', 'and beside you, to guide you,', 'forever, my love.', 'for loves not for one,', 'but for both of us to share.', 'for our country so fair,', 'for our world and whats there.', 'green grow the lilacs, all sparkling with dew', 'im lonely, my darling, since parting with you;', 'but by our next meeting iull hope to prove true', 'and change the green lilacs to the red, white and blue.', 'i once had a sweetheart, but now i have none', 'shes gone and shes left me, i care not for one', 'since shes gone and left me, contented ill be,', 'for she loves another one better than me.', 'i passed my loves window, both early and late', 'the look that she gave me, it makes my heart ache;', 'oh, the look that she gave me was painful to see,', 'i wrote my love letters in rosy red lines,', 'she sent me an answer all twisted and twined;', 'saying,keep your love letters and i will keep mine', 'just you write to your love and ill write to mine.', 'oh haste to the wedding, the pipes, the pipes are calling', 'oh haste to the wedding, oh haste to the wedding, i love you so.', 'ill take you home again, kathleen', 'across the ocean wild and wide', 'to where your heart has ever been', 'since you were first my bonnie bride.', 'the roses all have left your cheek.', 'ive watched them fade away and die', 'your voice is sad when eer you speak', 'and tears bedim your loving eyes.', 'oh! i will take you back, kathleen', 'to where your heart will feel no pain', 'and when the fields are fresh and green', 'ill take you to your home again!', 'i know you love me, kathleen, dear', 'your heart was ever fond and true.', 'i always feel when you are near', 'that life holds nothing, dear, but you.', 'the smiles that once you gave to me', 'i scarcely ever see them now', 'though many, many times i see', 'a darkning shadow on your brow.', 'to that dear home beyond the sea', 'my kathleen shall again return.', 'and when thy old friends welcome thee', 'thy loving heart will cease to yearn.', 'where laughs the little silver stream', 'beside your mothers humble cot', 'and brightest rays of sunshine gleam', 'there all your grief will be forgot.', 'ill tell my ma when i go home', 'the boys wont leave the girls alone', 'they pulled my hair, they stole my comb', 'but thats all right till i go home.', 'she is handsome, she is pretty', 'she is the bell of belfast city', 'she is counting one, two, three', 'please wont you tell me who is she.', 'albert mooney says he loves her', 'all the boys are fighting for her', 'they knock at the door and they ring at the bell', 'sayin oh my true love, are you well?', 'out she comes as white as snow', 'rings on her fingers and bells on her toes', 'old john murray says shell die', 'if she doesnt get the fellow with the roving eye.', 'let the wind and rain and the hail blow high', 'and the snow come tumblin from the sky', 'shes as nice as apple pie', 'shell get her own lad by and by.', 'when she gets a lad of her own', 'she wont tell her ma when she goes home', 'let them all come as they will', 'for its albert mooney she loves still.', 'while goin the road to sweet athy, ', 'hurroo, hurroo', 'while goin the road to sweet athy', 'a stick in me hand and a drop in me eye', 'a doleful damsel i heard cry,', 'johnny i hardly knew ye.', 'with your drums and guns and drums and guns', 'the enemy nearly slew ye', 'oh my darling dear, ye look so queer', 'where are your eyes that were so mild', 'when my heart you so beguiled', 'why did ye run from me and the child', 'oh johnny, i hardly knew ye.', 'where are your legs that used to run', 'when you went for to carry a gun', 'indeed your dancing days are done', 'im happy for to see ye home', 'all from the island of sulloon', 'so low in flesh, so high in bone', 'oh johnny i hardly knew ye.', 'ye havent an arm, ye havent a leg', 'yere an armless, boneless, chickenless egg', 'yell have to put with a bowl out to beg', 'theyre rolling out the guns again', 'but they never will take our sons again', 'no they never will take our sons again', 'johnny im swearing to ye.', 'as i was a-walkin round kilgary mountain', 'i met with captain pepper as his money he was countin', 'i rattled my pistols and i drew forth my saber', 'sayin, stand and deliver, for i am the bold deceiver.', 'musha rig um du rum da', 'whack fol the daddy o', 'theres whiskey in the jar.', 'the shinin golden coins did look so bright and jolly', 'i took em with me home and i gave em to my molly', 'she promised and she vowed that she never would deceive me', 'but the devils in the women and they never can be easy.', 'when i was awakened between six and seven', 'the guards were all around me in numbers odd and even', 'i flew to my pistols, but alas i was mistaken', 'for mollys drawn my pistols and a prisoner i was taken.', 'they put me into jail without judge or writin', 'for robbing colonel pepper on kilgary mountain', 'but they didnt take my fists so i knocked the sentry down', 'and bid a fond farewell to the jail in sligo town.', 'now some take delight in fishin and in bowlin', 'and others take delight in carriages a-rollin', 'but i take delight in the juice of the barley', 'and courtin pretty girls in the morning so early.', 'oer railroad ties and crossings', 'i made my weary way,', 'through swamps and elevations', 'my tired feet did stray', 'until i resolved at sunset', 'some higher ground to win.', 'twas there i met with a creole girl', 'by the lake of ponchartrain.', 'good evening, fair maiden,', 'my money does me no good.', 'if it want for the allegators', 'id stay out in the wood.', 'youre welcome, welcome, stranger.', 'at home it is quite plain', 'for we never turn a stranger', 'from the lake of ponchartrain.', 'she took me to her mothers home', 'and she treated me quite well;', 'her long black hair in ringlets', 'upon her shoulders fell.', 'i tried to paint her picture', 'but, alas, it was in vain', 'so handsome was that creole girl', 'i asked her if shed marry me', 'she said that neer could be;', 'she said she had a lover,', 'and he was on the sea,', 'she said she had a lover', 'it was true she would remain,', 'until he returned for the creole girl', 'adieu, adieu, fair maiden,', 'you neer shall see me more', 'and when you are thinking of the old times', 'and the cottage by the shore', 'and when i meet a sociable', 'with a glass of the foaming main', 'ill drink good health to the creole girl', 'n the town of athy one jeremy lanigan', 'battered away til he hadnt a pound.', 'his father died and made him a man again', 'left him a farm and ten acres of ground.', 'he gave a grand party for friends and relations', 'who didnt forget him when come to the wall,', 'and if youll but listen ill make your eyes glisten', 'of the rows and the ructions of lanigans ball.', 'myself to be sure got free invitation,', 'for all the nice girls and boys i might ask,', 'and just in a minute both friends and relations', 'were dancing round merry as bees round a cask.', 'judy odaly, that nice little milliner,', 'she tipped me a wink for to give her a call,', 'and i soon arrived with peggy mcgilligan', 'just in time for lanigans ball.', 'there were lashings of punch and wine for the ladies,', 'potatoes and cakes; there was bacon and tea,', 'there were the nolans, dolans, ogradys', 'courting the girls and dancing away.', 'songs they went round as plenty as water,', 'the harp that once sounded in taras old hall,', 'sweet nelly gray and the rat catchers daughter,', 'all singing together at lanigans ball.', 'they were doing all kinds of nonsensical polkas', 'all round the room in a whirligig.', 'julia and i, we banished their nonsense', 'and tipped them the twist of a reel and a jig.', '&och mavrone, how the girls got all mad at me', 'danced til youd think the ceiling would fall.', 'for i spent three weeks at brooks academy', 'learning new steps for lanigans ball.', 'three long weeks i spent up in dublin,', 'three long weeks to learn nothing at all,', 'she stepped out and i stepped in again,', 'i stepped out and she stepped in again,', 'boys were all merry and the girls they were hearty', 'and danced all around in couples and groups,', 'til an accident happened, young terrance mccarthy', 'put his right leg through miss finnertys hoops.', 'poor creature fainted and cried: meelia murther,', 'called for her brothers and gathered them all.', 'carmody swore that hed go no further', 'til he had satisfaction at lanigans ball.', 'in the midst of the row miss kerrigan fainted,', 'her cheeks at the same time as red as a rose.', 'some of the lads declared she was painted,', 'she took a small drop too much, i suppose.', 'her sweetheart, ned morgan, so powerful and able,', 'when he saw his fair colleen stretched out by the wall,', 'tore the left leg from under the table', 'and smashed all the chaneys at lanigans ball.', 'boys, oh boys, twas then there were runctions.', 'myself got a lick from big phelim mchugh.', 'i soon replied to his introduction', 'and kicked up a terrible hullabaloo.', 'old casey, the piper, was near being strangled.', 'they squeezed up his pipes, bellows, chanters and all.', 'the girls, in their ribbons, they got all entangled', 'and that put an end to lanigans ball.', 'step we gaily, on we go', 'heel for heel and toe for toe,', 'arm in arm and row on row', 'all for mairis wedding.', 'over hillways up and down', 'myrtle green and bracken brown,', 'past the sheilings through the town', 'all for sake of mairi.', 'red her cheeks as rowans are', 'bright her eyes as any star,', 'fairest o them all by far', 'is our darlin mairi.', 'plenty herring, plenty meal', 'plenty peat to fill her creel,', 'plenty bonny bairns as weel', 'thats the toast for mairi.', 'i have seen the lark soar high at morn', 'heard his song up in the blue', 'i have heard the blackbird pipe his note', 'the thrush and the linnet too', 'but theres none of them can sing so sweet', 'my singing bird as you.', 'if i could lure my singing bird', 'from his own cozy nest', 'if i could catch my singing bird', 'i would warm him on my breast', 'for theres none of them can sing so sweet', 'of all the money that eer i spent', 'ive spent it in good company', 'and all the harm that ever i did', 'alas it was to none but me', 'and all ive done for want of wit', 'to memory now i cant recall', 'so fill to me the parting glass', 'good night and joy be with you all', 'if i had money enough to spend', 'and leisure to sit awhile', 'there is a fair maid in the town', 'that sorely has my heart beguiled', 'her rosy cheeks and ruby lips', 'i own she has my heart enthralled', 'oh, all the comrades that eer i had', 'theyre sorry for my going away', 'and all the sweethearts that eer i had', 'theyd wish me one more day to stay', 'but since it falls unto my lot', 'that i should rise and you should not', 'ill gently rise and softly call', 'it was on a fine summers morning,', 'when the birds sweetly tuned on each bough;', 'i heard a fair maid sing most charming', 'as she sat a-milking her cow;', 'her voice, it was chanting melodious,', 'she left me scarce able to go;', 'my heart it is soothed in solace,', 'my cailín deas crúite na mbó.', 'with courtesy i did salute her,', 'good-morrow, most amiable maid,', 'im your captive slave for the future.', 'kind sir, do not banter, she said,', 'im not such a precious rare jewel,', 'that i should enamour you so;', 'i am but a plain country girl,', 'says cailín deas crúite na mbó.', 'the indies afford no such jewel,', 'so precious and transparently fair,', 'oh! do not to my flame add fuel,', 'but consent for to love me, my dear;', 'take pity and grant my desire,', 'and leave me no longer in woe;', 'oh! love me or else ill expire,', 'sweet cailín deas crúite na mbó.', 'or had i the wealth of great damer,', 'or all on the african shore,', 'or had i great devonshire treasure,', 'or had i ten thousand times more,', 'or had i the lamp of alladin,', 'or had i his genie also,', 'id rather live poor on a mountain,', 'with cailín deas crúite na mbó.', 'i beg youll withdraw and dont tease me;', 'i cannot consent unto thee.', 'i like to live single and airy,', 'till more of the world i do see.', 'new cares they would me embarrass,', 'besides, sir, my fortune is low,', 'until i get rich ill not marry,', 'an old maid is like an old almanack,', 'quite useless when once out of date;', 'if her ware is not sold in the morning', 'at noon it must fall to low rate.', 'the fragrance of may is soon over,', 'the rose loses its beauty, you know;', 'all bloom is consumed in october,', 'a young maid is like a ship sailing,', 'theres no knowing how long she may steer,', 'for with every blast shes in danger;', 'oh! consent, love, and banish all care.', 'for riches i care not a farthing,', 'your affection i want and no more;', 'in comfort id wish to enjoy you,', 'red is the rose that in yonder garden grows', 'fair is the lily of the valley', 'clear is the water that flows from the boyne', 'but my love is fairer than any.', 'come over the hills, my bonnie irish lass', 'come over the hills to your darling', 'you choose the rose, love, and ill make the vow', 'and ill be your true love forever.', 'twas down by killarneys green woods that we strayed', 'when the moon and the stars they were shining', 'the moon shone its rays on her locks of golden hair', 'and she swore shed be my love forever.', 'its not for the parting that my sister pains', 'its not for the grief of my mother', 'tis all for the loss of my bonny irish lass', 'that my heart is breaking forever.', 'in the merry month of june from me home i started,', 'left the girls of tuam so sad and broken hearted,', 'saluted father dear, kissed me darling mother,', 'drank a pint of beer, me grief and tears to smother,', 'then off to reap the corn, leave where i was born,', 'cut a stout black thorn to banish ghosts and goblins;', 'bought a pair of brogues rattling oer the bogs', 'and frightning all the dogs on the rocky road to dublin.', 'one, two, three four, five, hunt the hare and turn her down the rocky', 'road and all the way to dublin, whack follol de rah !', 'in mullingar that night i rested limbs so weary, started by daylight', 'next morning blithe and early, took a drop of pure to keep me heartfrom sinking;', 'thats a paddys cure whenever hes on drinking. see the lassies smile, laughing', 'all the while at me curious style, twould set your heart a bubblin', 'asked me was i hired, wages i required, i was almost tired of the', 'rocky road to dublin.', 'in dublin next arrived, i thought it be a pity', 'to be soon deprived a view of that fine city.', 'so then i took a stroll, all among the quality;', 'me bundle it was stole, all in a neat locality.', 'something crossed me mind, when i looked behind,', 'no bundle could i find upon me stick a wobblin', 'enquiring for the rogue, they said me connaught brogue', 'wasnt much in vogue on the rocky road to dublin.', 'from there i got away, me spirits never falling,', 'landed on the quay, just as the ship was sailing.', 'the captain at me roared, said that no room had he;', 'when i jumped aboard, a cabin found for paddy.', 'down among the pigs, played some hearty rigs,', 'danced some hearty jigs, the water round me bubbling;', 'when off holyhead wished meself was dead,', 'or better for instead on the rocky road to dublin.', 'well the bouys of liverpool, when we safely landed,', 'called meself a fool, i could no longer stand it.', 'blood began to boil, temper i was losing;', 'poor old erins isle they began abusing.', 'hurrah me soul says i, me shillelagh i let fly.', 'some galway boys were nigh and saw i was a hobble in,', 'with a load hurray ! joined in the affray.', 'we quitely cleared the way for the rocky road to dublin.', 'road and all the way to dublin, whack fol all the ra !', 'o see the fleet-foot host of men, who march with faces drawn,', 'from farmstead and from fishers cot, along the banks of ban;', 'they come with vengeance in their eyes. too late! too late are they,', 'for young roddy mccorley goes to die on the bridge of toome today.', 'oh ireland, mother ireland, you love them still the best', 'the fearless brave who fighting fall upon your hapless breast,', 'but never a one of all your dead more bravely fell in fray,', 'than he who marches to his fate on the bridge of toome today.', 'up the narrow street he stepped, so smiling, proud and young.', 'about the hemp-rope on his neck, the golden ringlets clung;', 'theres neer a tear in his blue eyes, fearless and brave are they,', 'as young roddy mccorley goes to die on the bridge of toome today.', 'when last this narrow street he trod, his shining pike in hand', 'behind him marched, in grim array, a earnest stalwart band.', 'to antrim town! to antrim town, he led them to the fray,', 'but young roddy mccorley goes to die on the bridge of toome today.', 'the grey coat and its sash of green were brave and stainless then,', 'a banner flashed beneath the sun over the marching men;', 'the coat hath many a rent this noon, the sash is torn away,', 'and roddy mccorley goes to die on the bridge of toome today.', 'oh, how his pike flashed in the sun! then found a foemans heart,', 'through furious fight, and heavy odds he bore a true mans part', 'and many a red-coat bit the dust before his keen pike-play,', 'but roddy mccorley goes to die on the bridge of toome today.', 'theres never a one of all your dead more bravely died in fray', 'than he who marches to his fate in toomebridge town today;', 'true to the last! true to the last, he treads the upwards way,', 'and young roddy mccorley goes to die on the bridge of toome today.', 'ive traveled all over this world', 'and now to another i go', 'and i know that good quarters are waiting', 'to welcome old rosin the bow', 'to welcome old rosin the bow.', 'when im dead and laid out on the counter', 'a voice you will hear from below', 'saying send down a hogshead of whiskey', 'to drink with old rosin the bow', 'to drink with old rosin the bow.', 'then get a half dozen stout fellows', 'and stack them all up in a row', 'let them drink out of half gallon bottles', 'to the memory of rosin the bow', 'to the memory of rosin the bow.', 'then get this half dozen stout fellows', 'and let them all stagger and go', 'and dig a great hole in the meadow', 'and in it put rosin the bow', 'and in it put rosin the bow.', 'then get ye a couple of bottles', 'put one at me head and me toe', 'with a diamond ring scratch upon them', 'the name of old rosin the bow', 'the name of old rosin the bow.', 'ive only this one consolation', 'as out of this world i go', 'i know that the next generation', 'will resemble old rosin the bow', 'will resemble old rosin the bow.', 'i fear that old tyrant approaching', 'that cruel remorseless old foe', 'and i lift up me glass in his honor', 'take a drink with old rosin the bow', 'take a drink with old rosin the bow.', 'he was stranded in a tiny town on fair prince edward isle', 'waiting for a ship to come and find him', 'a one horse place, a friendly face, some coffee and a tiny trace', 'of fiddlin in the distance far behind him', 'a dime across the counter then, a shy hello, a brand new friend', 'a walk along the street in the wintry weather', 'a yellow light, an open door, and a welcome friend, theres room for more', 'and then theyre standing there inside together', 'he said, ive heard that tune before somewhere but i cant remember when,', 'was it on some other friendly shore, did i hear it on the wind', 'was it written on the sky above, i think i heard it from someone i love', 'but i never heard a sound so sweet since then', 'and now his feet begin to tap, a little boy says, ill take your hat.', 'hes caught up in the magic of her smile', 'leap, the heart inside him went, and off across the floor he sent', 'his clumsy body, graceful as a child', 'he said, theres magic in the fiddlers arms and theres magic in this town', 'theres magic in the dancers feet and the way they put them down', 'people smiling everywhere, boots and ribbons, locks of hair', 'laughtcr, old blue suits and easter gowns', 'the sailors gone, the room is bare, the old pianos setting there', 'someones hats left hanging on the rack', 'the empty chair, the wooden floor that feels the touch of shoes no more', 'awaitin for the dancers to come back', 'and thc fiddles in the closet of some daughter of the town', 'the strings are broke, tbe bow is gone and the covers buttoned down', 'but sometimes on december nights, when the air is cold and the wind is right', 'theres a melody that passes through the town.', 'my young love said to me, my mother wont mind', 'and my father wont slight you for your lack of kind.', 'and she stepped away from me and this she did say', 'it will not be long, love, till our wedding day.', 'as she stepped away from me and she moved through the fair', 'and fondly i watched her move here and move there', 'and then she turned homeward with one star awake', 'like the swan in the evening moves over the lake.', 'the people were saying, no two eer were wed', 'but one had a sorrow that never was said', 'and i smiled as she passed with her goods and her gear', 'and that was the last that i saw of my dear.', 'last night she came to me, my dead love came in', 'so softly she came that her feet made no din', 'as she laid her hand on me and this she did say:', 'it will not be long, love, til our wedding day.', 'oh father dear, i oft-times hear you speak of erins isle', 'her lofty hills, her valleys green, her mountains rude and wild', 'they say she is a lovely land wherein a saint might dwell', 'so why did you abandon her, the reason to me tell.', 'oh son, i loved my native land with energy and pride', 'till a blight came oer the praties; my sheep, my cattle died', 'my rent and taxes went unpaid, i could not them redeem', 'and thats the cruel reason why i left old skibbereen.', 'oh well do i remember that bleak december day', 'the landlord and the sheriff came to take us all away', 'they set my roof on fire with their cursed english spleen', 'i heaved a sigh and bade goodbye to dear old skibbereen.', 'your mother too, god rest her soul, fell on the stony ground', 'she fainted in her anguish seeing desolation round', 'she never rose but passed away from life to immortal dream', 'she found a quiet grave, me boy, in dear old skibbereen.', 'and you were only two years old and feeble was your frame', 'i could not leave you with my friends for you bore your fathers name', 'i wrapped you in my c�ta m�r in the dead of night unseen', 'oh father dear, the day will come when in answer to the call', 'all irish men of freedom stern will rally one and all', 'ill be the man to lead the band beneath the flag of green', 'and loud and clear well raise the cheer, revenge for skibbereen!', 'be thou my vision, o lord of my heart', 'naught be all else to me save that thou art', 'thou my best thought by day or by night', 'waking or sleeping thy presence my light.', 'be thou my wisdom, thou my true word', 'i ever with thee, thou with me, lord', 'thou my great father, i thy true son', 'thou in me dwelling, and i with thee one.', 'be thou my battleshield, sword for the fight', 'be thou my dignity, thou my delight', 'thou my souls shelter, thou my high tower', 'raise thou me heavenward, o power of my power.', 'riches i heed not, nor mans empty praise', 'thou mine inheritance, now and always', 'thou and thou only, first in my heart', 'high king of heavem, my treasure thou art.', 'high king of heaven, after victory won', 'may i reach heavens joys, o bright heavens sun', 'heart of my own heart, whatever befall', 'still be my vision, o ruler of all.', 'last night as i lay dreaming of pleasant days gone by', 'my mind being bent on rambling to ireland i did fly', 'i stepped on board a vision and i followed with the wind', 'and i shortly came to anchor at the cross of spancil hill', 'it being the 23rd june the day before the fair', 'when lrelands sons and daughters in crowds assembled there', 'the young and the old, the brave and the bold their journey to fulfill', 'there were jovial conversations at the fair of spancil hill', 'i went to see my neighbors to hear what they might say', 'the old ones were all dead and gone and the young ones turning grey', 'i met with the tailor quigley, hes a bould as ever still', 'sure he used to make my britches when i lived in spancil hill', 'i paid a flying visit to my first and only love', 'shes as white as any lily and as gentle as a dove', 'she threw her arms around me saying johnny i love you still', 'oh shes ned the farmers daughter and the flower of spancil hiii', 'i dreamt i held and kissed her as in the days of yore', 'she said, johnny youre only joking like manys the time before', 'the cock he crew in the morning he crew both loud and shrill', 'and i awoke in california, many miles from spancil hill.', 'near banbridge town, in the county down', 'one morning in july', 'down a boreen green came a sweet colleen', 'and she smiled as she passed me by.', 'she looked so sweet from her two white feet', 'to the sheen of her nut-brown hair', 'such a coaxing elf, id to shake myself', 'to make sure i was standing there.', 'from bantry bay up to derry quay', 'and from galway to dublin town', 'no maid ive seen like the sweet colleen', 'that i met in the county down.', 'as she onward sped i shook my head', 'and i gazed with a feeling rare', 'and i said, says i, to a passerby', 'whos the maid with the nut-brown hair?', 'he smiled at me, and with pride says he,', 'thats the gem of irelands crown.', 'shes young rosie mccann from the banks of the bann', 'shes the star of the county down.', 'ive travelled a bit, but never was hit', 'since my roving career began', 'but fair and square i surrendered there', 'to the charms of young rose mccann.', 'id a heart to let and no tenant yet', 'did i meet with in shawl or gown', 'but in she went and i asked no rent', 'from the star of the county down.', 'at the crossroads fair ill be surely there', 'and ill dress in my sunday clothes', 'and ill try sheeps eyes, and deludhering lies', 'on the heart of the nut-brown rose.', 'no pipe ill smoke, no horse ill yoke', 'though with rust my plow turns brown', 'till a smiling bride by my own fireside', 'sits the star of the county down.', 'it was early, early in the spring', 'the birds did whistle and sweetly sing', 'changing their notes from tree to tree', 'and the song they sang was old ireland free.', 'it was early early in the night,', 'the yeoman cavalry gave me a fright', 'the yeoman cavalry was my downfall', 'and i was taken by lord cornwall.', 'twas in the guard-house where i was laid,', 'and in a parlour where i was tried', 'my sentence passed and my courage low', 'when to dungannon i was forced to go.', 'as i was passing my fathers door', 'my brother william stood at the door', 'my aged father stood at the door', 'and my tender mother her hair she tore.', 'as i was going up wexford street', 'my own first cousin i chanced to meet;', 'my own first cousin did me betray', 'and for one bare guinea swore my life away.', 'as i was walking up wexford hill', 'who could blame me to cry my fill?', 'i looked behind, and i looked before', 'but my aged mother i shall see no more.', 'and as i mounted the platform high', 'my aged father was standing by;', 'my aged father did me deny', 'and the name he gave me was the croppy boy.', 'it was in dungannon this young man died', 'and in dungannon his body lies.', 'and you good people that do pass by', 'oh shed a tear for the croppy boy.', 'one morning early i walked forth', 'by the margin of lough leane', 'the sunshine dressed the trees in green', 'and summer bloomed again', 'i left the town and wandered on', 'through fields all green and gay', 'and whom should i meet but a colleen sweet', 'at the dawning of the day.', 'no cap or cloak this maiden wore', 'her neck and feet were bare', 'down to the grass in ringlets fell', 'her glossy golden hair', 'a milking pail was in her hand', 'she was lovely, young and gay', 'she wore the palm from venus bright', 'by the dawning of the day.', 'on a mossy bank i sat me down', 'with the maiden by my side', 'with gentle words i courted her', 'and asked her to be my bride', 'she said, young man dont bring me blame', 'and swiftly turned away', 'and the morning light was shining bright', 'by a lonely prison wall', 'i heard a sweet voice calling,', 'danny, they have taken you away.', 'for you stole travelians corn,', 'that your babes might see the morn,', 'now a prison ship lies waiting in the bay.', 'fair lie the fields of athenry', 'where once we watched the small freebirds fly.', 'our love grew with the spring,', 'we had dreams and songs to sing', 'as we wandered through the fields of athenry.', 'i heard a young man calling', 'nothing matters, jenny, when youre free', 'against the famine and the crown,', 'i rebelled, they ran me down,', 'now you must raise our children without me.', 'on the windswept harbour wall,', 'she watched the last star rising', 'as the prison ship sailed out across the sky', 'but shell watch and hope and pray,', 'for her love in botany bay', 'whilst she is lonely in the fields of athenry.', 'oh, a wan cloud was drawn oer the dim weeping dawn', 'as to shannons side i returnd at last', 'and the heart in my breast for the girl i lovd best', 'was beating, ah, beating, loud and fast!', 'while the doubts and the fears of the long aching years', 'seemd mingling their voices with the moaning flood', 'till full in my path, like a wild water wrath', 'my true loves shadow lamenting stood.', 'but the sudden sun kissd the cold, cruel mist', 'into dancing showrs of diamond dew', 'and the dark flowing stream laughd back to his beam', 'and the lark soared aloft in the blue', 'while no phantom of night but a form of delight', 'ran with arms outspread to her darling boy', 'and the girl i love best on my wild throbbing breast', 'hid her thousand treasures with cry of joy.', 'gather up the pots and the old tin cans', 'the mash, the corn, the barley and the bran.', 'run like the devil from the excise man', 'keep the smoke from rising, barney.', 'keep your eyes well peeled today', 'the excise men are on their way', 'searching for the mountain tay', 'in the hills of connemara.', 'swinging to the left, swinging to the right', 'the excise men will dance all night', 'drinkin up the tay till the broad daylight', 'a gallon for the butcher and a quart for john', 'and a bottle for poor old father tom', 'just to help the poor old dear along', 'stand your ground, for its too late', 'the excise men are at the gate.', 'glory be to paddy, but theyre drinkin it straight', 'im sitting on the stile, mary, where we once sat side by side', 'on a bright may morning long ago, when first you were my bride', 'the corn was springing fresh and green, and the lark sang loud and high', 'and the red was on your lips, mary, and the love light in your eyes.', 'tis but a step down yonder lane, the village church stands near', 'the place where we were wed, mary, i can see the spire from here', 'but the graveyard lies between, mary, and my step might break your rest', 'where i laid you darling down to sleep with a baby on your breast.', 'im very lonely now, mary, for the poor make no new friends', 'but oh they love the better still the few our father sends', 'for you were all i had, mary, my blessing and my pride', 'and ive nothing left to care for now since my poor mary died.', 'yours was the good brave heart, mary, that still kept hoping on', 'when the trust in god had left my soul and my arms young strength had gone', 'there was comfort ever on your lip and a kind look on your brow', 'and i thank you mary for the same though you cannot hear me now.', 'im bidding you a long farewell, my mary kind and true', 'but ill not forget you, darling, in the land im going to', 'they say theres bread and work for all, and the sun shines always there', 'but ill neer forget old ireland, were it fifty times as fair.', 'and often in those grand old woods ill sit and shut my eyes', 'and my heart will wander back again to the place where mary lies', 'and i think ill see that little stile where we sat side by side', 'in the springing corn and the bright may morn when first you were my bride.', 'when i was at home i was merry and frisky,', 'my dad kept a pig and my mother sold whisky,', 'my uncle was rich, but never would by aisey', 'till i was enlisted by corporal casey.', 'och! rub a dub, row de dow, corporal casey,', 'my dear little shelah, i thought would run crazy,', 'when i trudged away with tough corporal casey.', 'i marched from kilkenny, and, as i was thinking', 'on shelah, my heart in my bosom was sinking,', 'but soon i was forced to look fresh as a daisy,', 'for fear of a drubbing from corporal casey.', 'och! rub a dub, row de dow, corporal casey!', 'the devil go with him, i neer could be lazy,', 'he struck my shirts so, ould corporal casey.', 'we went into battle, i took the blows fairly', 'that fell on my pate, but they bothered me rarely,', 'and who should the first be that dropped, why, and please ye,', 'it was my good friend, honest corporal casey.', 'thinks i you are quiet, and i shall be aisey,', 'so eight years i fought without corporal casey.', 'i am a little beggarman, a begging i have been', 'for three score years in this little isle of green', 'im known along the liffey from the basin to the zoo', 'and everybody calls me by the name of johnny dhu.', 'of all the trades a going, sure the begging is the best', 'for when a man is tired he can sit him down and rest', 'he can beg for his dinner, he has nothing else to do', 'but to slip around the corner with his old rigadoo.', 'i slept in a barn one night in currabawn', 'a shocking wet night it was, but i slept until the dawn', 'there was holes in the roof and the raindrops coming thru', 'and the rats and the cats were a playing peek a boo.', 'who did i waken but the woman of the house', 'with her white spotted apron and her calico blouse', 'she began to frighten and i said boo', 'sure, dont be afraid at all, its only johnny dhu.', 'i met a little girl while a walkin out one day', 'good morrow little flaxen haired girl, i did say', 'good morrow little beggarman and how do you do', 'with your rags and your tags and your auld rigadoo.', 'ill buy a pair of leggins and a collar and a tie', 'and a nice young lady ill go courting by and by', 'ill buy a pair of goggles and ill color them with blue', 'and an old fashioned lady i will make her too.', 'so all along the high road with my bag upon my back', 'over the fields with my bulging heavy sack', 'with holes in my shoes and my toes a peeping thru', 'singing, skin a ma rink a doodle with my auld rigadoo.', 'o i must be going to bed for its getting late at night', 'the fire is all raked and now tis out of light', 'for now youve heard the story of my auld rigadoo', 'so good and god be with you, from auld johnny dhu.', 'oh, the days of the kerry dancing', 'oh, the ring of the pipers tune', 'oh, for one of those hours of gladness', 'gone, alas, like our youth, too soon!', 'when the boys began to gather', 'in the glen of a summers night', 'and the kerry pipers tuning', 'made us long with wild delight!', 'oh, to think of it', 'oh, to dream of it', 'fills my heart with tears!', 'was there ever a sweeter colleen', 'in the dance than eily more', 'or a prouder lad than thady', 'as he boldly took the floor.', 'lads and lasses to your places', 'up the middle and down again', 'ah, the merry hearted laughter', 'ringing through the happy glen!', 'time goes on, and the happy years are dead', 'and one by one the merry hearts are fled', 'silent now is the wild and lonely glen', 'where the bright glad laugh will echo neer again', 'only dreaming of days gone by in my heart i hear.', 'loving voices of old companions', 'stealing out of the past once more', 'and the sound of the dear old music', 'soft and sweet as in days of yore.', 'dear thoughts are in my mind', 'and my soul soars enchanted,', 'as i hear the sweet lark sing', 'in the clear air of the day.', 'for a tender beaming smile', 'to my hope has been granted,', 'and tomorrow she shall hear', 'all my fond heart would say.', 'i shall tell her all my love,', 'all my souls adoration,', 'and i think she will hear', 'and will not say me nay.', 'it is this that gives my soul', 'all its joyous elation,', 'its cold and raw, the north winds blow', 'black in the morning early', 'when all the hills were covered with snow', 'oh then it was winter fairly.', 'as i was riding oer the moor', 'i met a farmers daughter', 'her cherry cheeks and coal-black hair', 'they caused my heart to falter.', 'i bowed my bonnet very low', 'to let her know my meaning.', 'she answered with a courteous smile', 'her looks they were engaging.', 'where are you bound my pretty maid', 'its now in the morning early?', 'the answer that she gave to me', 'kind sir, to sell my barley.', 'now twenty guineas ive in my purse', 'and twenty more thats yearly.', 'you need not go to the market town', 'for ill buy all your barley.', 'if twenty guineas would gain the heart', 'of the maid i love so dearly', 'all for to tarry with me one night', 'and go home in the morning early.', 'the very evening after', 'it was my fortune for to meet', 'the farmers only daughter.', 'although the weather being cold and raw', 'with her i thought to parlay', 'the answer that she gave to me:', 'kind sir, ive sold my barley.', 'the minstrel boy to the war is gone', 'in the ranks of death you will find him', 'his fathers sword he hath girded on', 'and his wild harp slung behind him', 'land of song! said the warrior bard', 'tho all the world betrays thee', 'one sword, at least, thy rights shall guard', 'one faithful harp shall praise thee!', 'the minstrel fell! but the foemans chain', 'could not bring that proud soul under', 'the harp he lovd neer spoke again', 'for he tore its chords asunder', 'and said no chains shall sully thee', 'thou soul of love and bravry!', 'thy songs were made for the pure and free,', 'they shall never sound in slavery!', 'oh mary this londons a wonderful sight', 'with people here workin by day and by night', 'they dont sow potatoes, nor barley, nor wheat', 'but theres gangs of them diggin for gold in the street', 'at least when i asked them thats what i was told', 'so i just took a hand at this diggin for gold', 'but for all that i found there i might as well be', 'where the mountains of mourne sweep down to the sea.', 'i believe that when writin a wish you expressed', 'as to how the fine ladies in london were dressed', 'well if youll believe me, when asked to a ball', 'they dont wear no top to their dresses at all', 'oh ive seen them meself and you could not in truth', 'say that if they were bound for a ball or a bath', 'dont be startin them fashions, now mary mccree', 'theres beautiful girls here, oh never you mind', 'with beautiful shapes nature never designed', 'and lovely complexions all roses and cream', 'but let me remark with regard to the same', 'that if that those roses you venture to sip', 'the colors might all come away on your lip', 'so ill wait for the wild rose thats waitin for me', 'in the place where the dark mourne sweeps down to the sea.', 'beauing, belling, dancing, drinking,', 'breaking windows, cursing, sinking', 'every raking, never thinking,', 'live the rakes of mallow,', 'spending faster than it comes,', 'beating waiters bailiffs, duns,', 'bacchus true begotten sons,', 'live the rakes of mallow.', 'one time naught but claret drinking,', 'then like politicians, thinking', 'to raise the sinking funds when sinking.', 'when at home, with da-da dying,', 'still for mellow water crying,', 'but, where theres good claret plying', 'live the rakes of mallow. ', 'when at home with dadda dying,', 'still for mallow-water crying,', 'but where there is good claret plying', 'living short but merry lives,', 'going where the devil drives,', 'having sweethearts, but no wives,', 'racking tenants stewards teasing,', 'swiftly spending, slowly raising,', 'wishing to spend all their days in', 'raking as at mallow.', 'then to end this raking life,', 'they get sober, take a wife,', 'ever after live in strife,', 'and wish again for mallow.', 'how sweet is to roam by the sunny shure stream', 'and hear the doves coo neath the morning sunbeam', 'where the thrush and the robin their sweet notes entwine', 'on the banks of the shure that flows down by mooncoin.', 'flow on, lovely river, flow gently along', 'by your waters so sweet sounds the larks merry song', 'on your green banks i wander where first i did join', 'with you, lovely molly, the rose of mooncoin.', 'oh molly, dear molly, it breaks my fond heart', 'to know that we two forever must part', 'ill think of you molly while sun and moon shine', 'then heres to the shure with its valley so fair', 'as oftimes we wandered in the cool morning air', 'where the roses are blooming and lilies entwine', 'the pale moon was rising above the green mountain', 'the sun was declining beneath the blue sea', 'when i strayed with my love to the pure crystal fountain', 'that stands in beautiful vale of tralee.', 'she was lovely and fair as the rose of the summer', 'yet, twas not her beauty alone that won me', 'oh no! twas the the truth in her eye ever beaming', 'that made me love mary, the rose of tralee.', 'the cool shades of evening their mantle were spreading', 'and mary all smiling was listening to me', 'the moon through the valley her pale rays was shedding', 'when i won the heart of the rose of tralee.', 'though lovely and fair as the rose of the summer', 'mellow the moonlight to shine is beginning', 'close by the window young eileen is spinning', 'bent oer the fire her blind grandmother sitting', 'crooning and moaning and drowsily knitting.', 'merrily cheerily noiselessly whirring', 'spins the wheel, rings the wheel while the foots stirring', 'sprightly and lightly and merrily ringing', 'sounds the sweet voice of the young maiden singing.', 'eileen, a chara, i hear someone tapping', 'tis the ivy dear mother against the glass flapping', 'eileen, i surely hear somebody sighing', 'tis the sound mother dear of the autumn winds dying.', 'whats the noise i hear at the window i wonder?', 'tis the little birds chirping, the holly-bush under', 'what makes you shoving and moving your stool on', 'and singing all wrong the old song of the coolin?', 'theres a form at the casement, the form of her true love', 'and he whispers with face bent, im waiting for you love', 'get up from the stool, through the lattice step lightly', 'and well rove in the grove while the moons shining brightly.', 'the maid shakes her head, on her lips lays her fingers', 'steps up from the stool, longs to go and yet lingers', 'a frightened glance turns to her drowsy grandmother', 'puts her foot on the stool spins the wheel with the other', 'lazily, easily, now swings the wheel round', 'slowly and lowly is heard now the reels sound', 'noiseless and light to the lattice above her', 'the maid steps, then leaps to the arms of her lover.', 'slower... and slower... and slower the wheel swings', 'lower... and lower... and lower the reel rings', 'ere the reel and the wheel stop their ringing and moving', 'through the grove the young lovers by moonlight are roving.', 'as i roved out one morning', 'near the verdant braes of skreen', 'i put my back to the mossy tree', 'to view the dew on the west countrie', 'the dew on the foreign strand.', 'o sit ye down on the grass, he said', 'on the dewy grass so green', 'for the wee birds all have come and gone', 'since i my true love seen, he said', 'since i my true love seen.', 'o ill not sit on the grass, she said', 'no lover ill be of thine', 'for i hear you love a connaught maid', 'and your hearts no longer mine, she said', 'and your hearts no longer mine.', 'o i will climb a high high tree', 'and ill rob a wild birds nest', 'and back ill bring what i find there', 'to the arms that i love best, he said', 'to the arms that i love best.', 'the water is wide, i cannot get oer', 'neither have i wings to fly', 'give me a boat that can carry two', 'and both shall row, my love and i', 'a ship there is and she sails the sea', 'shes loaded deep as deep can be', 'but not so deep as the love im in', 'i know not if i sink or swim', 'i leaned my back against an oak', 'thinking it was a trusty tree', 'but first it bent and then it broke', 'so did my love prove false to me', 'i reached my finger into some soft bush', 'thinking the fairest flower to find', 'i pricked my finger to the bone', 'and left the fairest flower behind', 'oh love be handsome and love be kind', 'gay as a jewel when first it is new', 'but love grows old and waxes cold', 'and fades away like the morning dew', 'must i go bound while you go free', 'must i love a man who doesnt love me', 'must i be born with so little art', 'as to love a man wholl break my heart', 'when cockle shells turn silver bells', 'then will my love come back to me', 'when roses bloom in winters gloom', 'then will my love return to me', 'o paddy dear, and did ye hear the news thats goin round?', 'the shamrock is by law forbid to grow on irish ground!', 'no more saint patricks day well keep, his color cant be seen', 'for theres a cruel law agin the wearin o the green.', 'i met with napper tandy, and he took me by the hand', 'and he said, hows poor old ireland, and how does she stand?', 'shes the most distressful country that ever yet was seen', 'for theyre hanging men and women there for the wearin o the green.', 'so if the color we must wear be englands cruel red', 'let it remind us of the blood that irishmen have shed', 'and pull the shamrock from your hat, and throw it on the sod', 'but never fear, twill take root there, though underfoot tis trod.', 'when laws can stop the blades of grass from growin as they grow', 'and when the leaves in summer-time their color dare not show', 'then i will change the color too i wear in my caubeen', 'but till that day, please god, ill stick to the wearin o the green.', 'ive been a wild rover for many a year', 'and i spent all my money on whiskey and beer,', 'and now im returning with gold in great store', 'and i never will play the wild rover no more.', 'and its no, nay, never,', 'no nay never no more,', 'will i play the wild rover', 'no never no more.', 'i went to an ale-house i used to frequent', 'and i told the landlady my money was spent.', 'i asked her for credit, she answered me nay', 'such a custom as yours i could have any day.', 'i took from my pocket ten sovereigns bright', 'and the landladys eyes opened wide with delight.', 'she said i have whiskey and wines of the best', 'and the words that i spoke sure were only in jest.', 'ill go home to my parents, confess what ive done', 'and ill ask them to pardon their prodigal son.', 'and if they caress (forgive) me as ofttimes before', 'sure i never will play the wild rover no more.', 'theres a tear in your eye,', 'and im wondering why,', 'for it never should be there at all.', 'with such powr in your smile,', 'sure a stone youd beguile,', 'so theres never a teardrop should fall.', 'when your sweet lilting laughters', 'like some fairy song,', 'and your eyes twinkle bright as can be;', 'you should laugh all the while', 'and all other times smile,', 'and now, smile a smile for me.', 'when irish eyes are smiling,', 'sure, tis like the morn in spring.', 'in the lilt of irish laughter', 'you can hear the angels sing.', 'when irish hearts are happy,', 'all the world seems bright and gay.', 'and when irish eyes are smiling,', 'sure, they steal your heart away.', 'for your smile is a part', 'of the love in your heart,', 'and it makes even sunshine more bright.', 'like the linnets sweet song,', 'crooning all the day long,', 'comes your laughter and light.', 'for the springtime of life', 'is the sweetest of all', 'there is neer a real care or regret;', 'and while springtime is ours', 'throughout all of youths hours,', 'let us smile each chance we get.', 'as i was a-goin over gilgarra mountain', 'i spied colonel farrell, and his money he was countin.', 'first i drew my pistols and then i drew my rapier,', 'sayin stand and deliver, for i am your bold receiver.', 'musha ringum duram da,', 'whack fol the daddy-o,', 'he counted out his money and it made a pretty penny;', 'i put it in my pocket to take home to darlin jenny.', 'she sighed and swore she loved me and never would deceive me,', 'bu the devil take the women, for they always lie so easy!', 'musha rungum duram da', 'i went into me chamber all for to take a slumber,', 'to dream of gold and girls, and of course it was no wonder:', 'me jenny took me charges and she filled them up with water,', 'called on colonel farrell to get ready for the slaughter.', 'next mornin early, before i rose for travel,', 'a-came a band of footmen and likewise colonel farrell.', 'i goes to draw my pistol, for shed stole away my rapier,', 'but a prisoner i was taken, i couldnt shoot the water.', 'they put me into jail with a judge all a-writin:', 'for robbin colonel farrell on gilgarra mountain.', 'but they didnt take me fists and i knocked the jailer down', 'and bid a farewell to this tight-fisted town.', 'musha ringum duram da', 'id like to find me brother, the one whos in the army;', 'i dont know where hes stationed, be it cork or in killarney.', 'together wed go roamin oer the mountains of kilkenny,', 'and i swear hed treat me fairer than my darlin sportin jenny!', 'theres some takes delight in the carriages and rollin,', 'some takes delight in the hurley or the bollin,', 'but i takes delight in the juice of the barley,', 'courtin pretty maids in the mornin, o so early!', 'oh the summertime is coming', 'and the trees are sweetly blooming', 'and the wild mountain thyme', 'grows around the blooming heather', 'will ye go, lassie go?', 'and well all go together', 'to pluck wild mountain thyme', 'all around the blooming heather', 'i will build my love a tower', 'near yon pure crystal fountain', 'and on it i will build', 'all the flowers of the mountain', 'if my true love she were gone', 'i would surely find another', 'where wild mountain thyme', '']
###Markdown
From here, you can initialize the `Tokenizer` class and generate the word index dictionary:
###Code
# Initialize the Tokenizer class
tokenizer = Tokenizer()
# Generate the word index dictionary
tokenizer.fit_on_texts(corpus)
# Define the total words. You add 1 for the index `0` which is just the padding token.
total_words = len(tokenizer.word_index) + 1
print(f'word index dictionary: {tokenizer.word_index}')
print(f'total words: {total_words}')
###Output
word index dictionary: {'the': 1, 'and': 2, 'i': 3, 'to': 4, 'a': 5, 'of': 6, 'my': 7, 'in': 8, 'me': 9, 'for': 10, 'you': 11, 'all': 12, 'was': 13, 'she': 14, 'that': 15, 'on': 16, 'with': 17, 'her': 18, 'but': 19, 'as': 20, 'when': 21, 'love': 22, 'is': 23, 'your': 24, 'it': 25, 'will': 26, 'from': 27, 'by': 28, 'they': 29, 'be': 30, 'are': 31, 'so': 32, 'he': 33, 'old': 34, 'no': 35, 'oh': 36, 'ill': 37, 'at': 38, 'one': 39, 'his': 40, 'there': 41, 'were': 42, 'heart': 43, 'down': 44, 'now': 45, 'we': 46, 'where': 47, 'young': 48, 'never': 49, 'go': 50, 'come': 51, 'then': 52, 'did': 53, 'not': 54, 'said': 55, 'away': 56, 'their': 57, 'sweet': 58, 'them': 59, 'green': 60, 'if': 61, 'take': 62, 'our': 63, 'like': 64, 'night': 65, 'day': 66, 'o': 67, 'out': 68, 'fair': 69, 'this': 70, 'town': 71, 'have': 72, 'can': 73, 'true': 74, 'its': 75, 'thou': 76, 'see': 77, 'dear': 78, 'more': 79, 'theres': 80, 'or': 81, 'had': 82, 'would': 83, 'over': 84, 'hear': 85, 'up': 86, 'ive': 87, 'through': 88, 'home': 89, 'again': 90, 'well': 91, 'oer': 92, 'land': 93, 'good': 94, 'im': 95, 'ye': 96, 'sea': 97, 'left': 98, 'still': 99, 'father': 100, 'long': 101, 'rose': 102, 'could': 103, 'morning': 104, 'wild': 105, 'who': 106, 'eyes': 107, 'came': 108, 'while': 109, 'too': 110, 'back': 111, 'little': 112, 'an': 113, 'took': 114, 'him': 115, 'bow': 116, 'first': 117, 'let': 118, 'man': 119, 'shall': 120, 'know': 121, 'get': 122, 'high': 123, 'gone': 124, 'say': 125, 'ever': 126, 'some': 127, 'mary': 128, 'hand': 129, 'till': 130, 'put': 131, 'own': 132, 'time': 133, 'heard': 134, 'dead': 135, 'may': 136, 'bright': 137, 'mountain': 138, 'early': 139, 'rosin': 140, 'gave': 141, 'thee': 142, 'only': 143, 'far': 144, 'maid': 145, 'must': 146, 'find': 147, 'girl': 148, 'sure': 149, 'round': 150, 'dublin': 151, 'once': 152, 'world': 153, 'delight': 154, 'last': 155, 'johnny': 156, 'seen': 157, 'has': 158, 'fine': 159, 'road': 160, 'mother': 161, 'tis': 162, 'what': 163, 'way': 164, 'moon': 165, 'soul': 166, 'neer': 167, 'id': 168, 'just': 169, 'thats': 170, 'days': 171, 'darling': 172, 'went': 173, 'white': 174, 'die': 175, 'than': 176, 'hair': 177, 'goes': 178, 'meet': 179, 'today': 180, 'do': 181, 'girls': 182, 'shes': 183, 'thyme': 184, 'thy': 185, 'sing': 186, 'pretty': 187, 'new': 188, 'poor': 189, 'into': 190, 'life': 191, 'irish': 192, 'give': 193, 'boy': 194, 'youre': 195, 'make': 196, 'passed': 197, 'lovely': 198, 'black': 199, 'youll': 200, 'died': 201, 'red': 202, 'smile': 203, 'keep': 204, 'loves': 205, 'free': 206, 'leave': 207, 'friends': 208, 'each': 209, 'saw': 210, 'behind': 211, 'song': 212, 'ra': 213, 'dont': 214, 'arms': 215, 'am': 216, 'sun': 217, 'saying': 218, 'made': 219, 'wish': 220, 'cold': 221, 'met': 222, 'before': 223, 'should': 224, 'rocky': 225, 'light': 226, 'wid': 227, 'boys': 228, 'best': 229, 'fields': 230, 'since': 231, 'ball': 232, 'water': 233, 'casey': 234, 'mind': 235, 'along': 236, 'loved': 237, 'place': 238, 'ireland': 239, 'next': 240, 'three': 241, 'many': 242, 'years': 243, 'door': 244, 'us': 245, 'drink': 246, 'got': 247, 'might': 248, 'live': 249, 'roses': 250, 'play': 251, 'soon': 252, 'ground': 253, 'times': 254, 'spent': 255, 'going': 256, 'tree': 257, 'barley': 258, 'grass': 259, 'kind': 260, 'twas': 261, 'bridge': 262, 'around': 263, 'blue': 264, 'tell': 265, 'row': 266, 'how': 267, 'money': 268, 'merry': 269, 'stepped': 270, 'corporal': 271, 'always': 272, 'though': 273, 'near': 274, 'taken': 275, 'ones': 276, 'daughter': 277, 'forever': 278, 'loo': 279, 'shining': 280, 'plenty': 281, 'hes': 282, 'ship': 283, 'banks': 284, 'think': 285, 'very': 286, 'stand': 287, 'heres': 288, 'snow': 289, 'mountains': 290, 'molly': 291, 'wheel': 292, 'street': 293, 'erin': 294, 'side': 295, 'feet': 296, 'star': 297, 'look': 298, 'brave': 299, 'woman': 300, 'sons': 301, 'two': 302, 'says': 303, 'asked': 304, 'lanigans': 305, 'singing': 306, 'men': 307, 'toome': 308, 'stole': 309, 'god': 310, 'hill': 311, 'lonely': 312, 'lover': 313, 'tears': 314, 'fathers': 315, 'low': 316, 'voice': 317, 'quite': 318, 'able': 319, 'nice': 320, 'laid': 321, 'comrades': 322, 'wind': 323, 'another': 324, 'sit': 325, 'face': 326, 'band': 327, 'call': 328, 'colleen': 329, 'until': 330, 'hills': 331, 'mine': 332, 'above': 333, 'upon': 334, 'eer': 335, 'youve': 336, 'fly': 337, 'been': 338, 'late': 339, 'alive': 340, 'ballyjamesduff': 341, 'looked': 342, 'great': 343, 'why': 344, 'every': 345, 'proud': 346, 'found': 347, 'bragh': 348, 'such': 349, 'birds': 350, 'wedding': 351, 'welcome': 352, 'dancing': 353, 'da': 354, 'fell': 355, 'thinking': 356, 'roddy': 357, 'mccorley': 358, 'smiling': 359, 'mallow': 360, 'blooming': 361, 'thought': 362, 'peace': 363, 'soft': 364, 'pure': 365, 'harp': 366, 'dream': 367, 'alas': 368, 'yet': 369, 'clear': 370, 'art': 371, 'off': 372, 'hope': 373, 'fought': 374, 'mothers': 375, 'shore': 376, 'ago': 377, 'fol': 378, 'de': 379, 'house': 380, 'married': 381, 'bound': 382, 'danced': 383, 'devil': 384, 'dawning': 385, 'makes': 386, 'same': 387, 'sat': 388, 'any': 389, 'glass': 390, 'gay': 391, 'relations': 392, 'evening': 393, 'watched': 394, 'right': 395, 'fellows': 396, 'whiskey': 397, 'bonnie': 398, 'grows': 399, 'women': 400, 'flowers': 401, 'beauty': 402, 'cannot': 403, 'handsome': 404, 'happy': 405, 'gold': 406, 'rover': 407, 'none': 408, 'doneen': 409, 'summers': 410, 'people': 411, 'set': 412, 'paddy': 413, 'morn': 414, 'most': 415, 'easy': 416, 'struck': 417, 'beautiful': 418, 'those': 419, 'golden': 420, 'run': 421, 'pipes': 422, 'glen': 423, 'dying': 424, 'here': 425, 'wall': 426, 'across': 427, 'fire': 428, 'eileen': 429, 'longer': 430, 'cheeks': 431, 'valley': 432, 'both': 433, 'dew': 434, 'care': 435, 'bride': 436, 'nothing': 437, 'wont': 438, 'theyre': 439, 'colonel': 440, 'maiden': 441, 'shed': 442, 'til': 443, 'brown': 444, 'breast': 445, 'corn': 446, 'sinking': 447, 'began': 448, 'name': 449, 'cruel': 450, 'sound': 451, 'spancil': 452, 'county': 453, 'lies': 454, 'color': 455, 'thing': 456, 'decay': 457, 'sleep': 458, 'hours': 459, 'loving': 460, 'weary': 461, 'ringing': 462, 'please': 463, 'forget': 464, 'lie': 465, 'ran': 466, 'tore': 467, 'country': 468, 'fear': 469, 'fortune': 470, 'kissed': 471, 'alone': 472, 'ould': 473, 'cry': 474, 'dreams': 475, 'used': 476, 'horse': 477, 'break': 478, 'bells': 479, 'didnt': 480, 'weeks': 481, 'without': 482, 'raw': 483, 'nor': 484, 'twenty': 485, 'tune': 486, 'hed': 487, 'roving': 488, 'leaves': 489, 'cant': 490, 'death': 491, 'ten': 492, 'prison': 493, 'judge': 494, 'against': 495, 'lads': 496, 'shell': 497, 'fill': 498, 'valleys': 499, 'other': 500, 'pale': 501, 'joy': 502, 'wide': 503, 'bring': 504, 'ah': 505, 'cliffs': 506, 'city': 507, 'end': 508, 'turn': 509, 'sky': 510, 'born': 511, 'knew': 512, 'smiled': 513, 'rosie': 514, 'comes': 515, 'sayin': 516, 'lord': 517, 'dungannon': 518, 'blood': 519, 'air': 520, 'danny': 521, 'calling': 522, 'sunshine': 523, 'spring': 524, 'bid': 525, 'grow': 526, 'truth': 527, 'tear': 528, 'rings': 529, 'guns': 530, 'bay': 531, 'oflynn': 532, 'och': 533, 'stick': 534, 'rest': 535, 'four': 536, 'jewel': 537, 'tried': 538, 'grief': 539, 'answer': 540, 'kathleen': 541, 'fond': 542, 'eye': 543, 'goin': 544, 'pistols': 545, 'musha': 546, 'whack': 547, 'creole': 548, 'together': 549, 'room': 550, 'fall': 551, 'swore': 552, 'being': 553, 'step': 554, 'lark': 555, 'cailín': 556, 'deas': 557, 'crúite': 558, 'na': 559, 'mbó': 560, 'sir': 561, 'isle': 562, 'waiting': 563, 'magic': 564, 'skibbereen': 565, 'loud': 566, 'raise': 567, 'bent': 568, 'aged': 569, 'summer': 570, 'jenny': 571, 'excise': 572, 'rigadoo': 573, 'auld': 574, 'hearts': 575, 'nay': 576, 'stool': 577, 'farrell': 578, 'garden': 579, 'precious': 580, 'child': 581, 'slumber': 582, 'sleeping': 583, 'watch': 584, 'gently': 585, 'minstrel': 586, 'praise': 587, 'bell': 588, 'shaken': 589, 'immortal': 590, 'pray': 591, 'stay': 592, 'spoke': 593, 'cross': 594, 'brothers': 595, 'much': 596, 'past': 597, 'killarney': 598, 'sang': 599, 'tones': 600, 'ral': 601, 'wander': 602, 'cot': 603, 'feel': 604, 'yore': 605, 'answered': 606, 'divil': 607, 'middle': 608, 'bit': 609, 'led': 610, 'soldiers': 611, 'lily': 612, 'bed': 613, 'lassie': 614, 'clothes': 615, 'return': 616, 'broken': 617, 'derry': 618, 'sighed': 619, 'english': 620, 'tomorrow': 621, 'souls': 622, 'van': 623, 'diemans': 624, 'law': 625, 'neither': 626, 'winds': 627, 'rather': 628, 'doesnt': 629, 'rosy': 630, 'neatest': 631, 'hands': 632, 'whereon': 633, 'stands': 634, 'write': 635, 'thousand': 636, 'fare': 637, 'youd': 638, 'velvet': 639, 'neat': 640, 'landed': 641, 'health': 642, 'kellswater': 643, 'quiet': 644, 'stars': 645, 'beside': 646, 'warm': 647, 'sunday': 648, 'grey': 649, 'ocean': 650, 'sad': 651, 'spend': 652, 'kilkenny': 653, 'silver': 654, 'view': 655, 'west': 656, 'plain': 657, 'barrow': 658, 'broad': 659, 'narrow': 660, 'crying': 661, 'wonder': 662, 'save': 663, 'stop': 664, 'tender': 665, 'told': 666, 'lip': 667, 'dance': 668, 'foot': 669, 'kilrain': 670, 'saint': 671, 'visit': 672, 'mossy': 673, 'wexford': 674, 'irishmen': 675, 'shadow': 676, 'tho': 677, 'salley': 678, 'gardens': 679, 'foolish': 680, 'youth': 681, 'fade': 682, 'war': 683, 'believe': 684, 'which': 685, 'change': 686, 'entwine': 687, 'turns': 688, 'turned': 689, 'crown': 690, 'played': 691, 'captain': 692, 'blow': 693, 'children': 694, 'slainte': 695, 'gentle': 696, 'heavens': 697, 'bloom': 698, 'grand': 699, 'bush': 700, 'nest': 701, 'rich': 702, 'parting': 703, 'better': 704, 'window': 705, 'haste': 706, 'fresh': 707, 'stream': 708, 'rays': 709, 'ma': 710, 'ring': 711, 'lad': 712, 'athy': 713, 'drop': 714, 'hardly': 715, 'done': 716, 'arm': 717, 'leg': 718, 'beg': 719, 'drew': 720, 'bold': 721, 'drawn': 722, 'jail': 723, 'writin': 724, 'farewell': 725, 'tired': 726, 'lake': 727, 'want': 728, 'ringlets': 729, 'myself': 730, 'songs': 731, 'reel': 732, 'steps': 733, 'hearty': 734, 'fainted': 735, 'called': 736, 'under': 737, 'toe': 738, 'mairi': 739, 'fairest': 740, 'darlin': 741, 'bird': 742, 'memory': 743, 'lips': 744, 'sweetly': 745, 'morrow': 746, 'consent': 747, 'else': 748, 'sold': 749, 'stout': 750, 'pair': 751, 'drinking': 752, 'meself': 753, 'fray': 754, 'pike': 755, 'coat': 756, 'beneath': 757, 'rent': 758, 'part': 759, 'half': 760, 'head': 761, 'friend': 762, 'standing': 763, 'floor': 764, 'bare': 765, 'wed': 766, 'son': 767, 'pride': 768, 'vision': 769, 'sword': 770, 'after': 771, 'won': 772, 'farmers': 773, 'flower': 774, 'nut': 775, 'surely': 776, 'stood': 777, 'wandered': 778, 'athenry': 779, 'rising': 780, 'beating': 781, 'form': 782, 'dhu': 783, 'buy': 784, 'laughter': 785, 'wear': 786, 'raking': 787, 'rakes': 788, 'claret': 789, 'shure': 790, 'tralee': 791, 'slower': 792, 'lower': 793, 'deep': 794, 'wearin': 795, 'duram': 796, 'takes': 797, 'beware': 798, 'steal': 799, 'brings': 800, 'things': 801, 'joys': 802, 'bunch': 803, 'sailor': 804, 'chanced': 805, 'pass': 806, 'angels': 807, 'send': 808, 'drowsy': 809, 'keeping': 810, 'spirit': 811, 'stealing': 812, 'feeling': 813, 'roam': 814, 'presence': 815, 'heavenward': 816, 'dust': 817, 'dim': 818, 'journey': 819, 'waves': 820, 'frightened': 821, 'leaving': 822, 'struggle': 823, 'parents': 824, 'courage': 825, 'weeping': 826, 'pain': 827, 'mist': 828, 'felt': 829, 'roared': 830, 'making': 831, 'fever': 832, 'moment': 833, 'distance': 834, 'wailing': 835, 'oft': 836, 'held': 837, 'fast': 838, 'cabin': 839, 'honey': 840, 'diddle': 841, 'clearly': 842, 'open': 843, 'opened': 844, 'table': 845, 'wine': 846, 'lay': 847, 'shells': 848, 'sailed': 849, 'drown': 850, 'fetters': 851, 'chains': 852, 'wives': 853, 'sorrow': 854, 'thoughts': 855, 'cursed': 856, 'hell': 857, 'five': 858, 'buried': 859, 'lost': 860, 'endless': 861, 'slavery': 862, 'gun': 863, 'rain': 864, 'cares': 865, 'ghosts': 866, 'runaway': 867, 'twill': 868, 'month': 869, 'meadows': 870, 'prettiest': 871, 'winters': 872, 'satisfied': 873, 'few': 874, 'short': 875, 'lines': 876, 'shone': 877, 'shoulder': 878, 'belfast': 879, 'trade': 880, 'bad': 881, 'caused': 882, 'stray': 883, 'meaning': 884, 'damsel': 885, 'appear': 886, 'seven': 887, 'sentence': 888, 'jolly': 889, 'whenever': 890, 'wee': 891, 'wife': 892, 'lives': 893, 'martha': 894, 'courted': 895, 'bridgit': 896, 'omalley': 897, 'desolation': 898, 'thorn': 899, 'gaze': 900, 'stone': 901, 'approaching': 902, 'sets': 903, 'carrigfergus': 904, 'nights': 905, 'swim': 906, 'wings': 907, 'sober': 908, 'travel': 909, 'native': 910, 'places': 911, 'slopes': 912, 'hares': 913, 'lofty': 914, 'malone': 915, 'wheeled': 916, 'streets': 917, 'enough': 918, 'reilly': 919, 'tough': 920, 'whispers': 921, 'phil': 922, 'threw': 923, 'straight': 924, 'belles': 925, 'moor': 926, 'brand': 927, 'shapes': 928, 'work': 929, 'vow': 930, 'blarney': 931, 'paid': 932, 'bower': 933, 'remain': 934, 'charming': 935, 'storied': 936, 'chieftains': 937, 'slaughter': 938, 'bann': 939, 'boyne': 940, 'liffey': 941, 'gallant': 942, 'awake': 943, 'greet': 944, 'meadow': 945, 'sweeter': 946, 'dirty': 947, 'cats': 948, 'crossed': 949, 'field': 950, 'river': 951, 'full': 952, 'aroon': 953, 'sends': 954, 'woe': 955, 'chain': 956, 'main': 957, 'charms': 958, 'fondly': 959, 'fleet': 960, 'fairy': 961, 'thine': 962, 'known': 963, 'truly': 964, 'close': 965, 'story': 966, 'flag': 967, 'sweetest': 968, 'honor': 969, 'playing': 970, 'mauser': 971, 'music': 972, 'tom': 973, 'hurrah': 974, 'big': 975, 'lead': 976, 'south': 977, 'generation': 978, 'freedom': 979, 'agin': 980, 'creature': 981, 'dad': 982, 'venture': 983, 'word': 984, 'wonderful': 985, 'crazy': 986, 'lazy': 987, 'grave': 988, 'jest': 989, 'remark': 990, 'strangers': 991, 'strong': 992, 'shook': 993, 'walk': 994, 'north': 995, 'ours': 996, 'cease': 997, 'strife': 998, 'whats': 999, 'lilacs': 1000, 'prove': 1001, 'sweetheart': 1002, 'letters': 1003, 'sent': 1004, 'speak': 1005, 'brow': 1006, 'albert': 1007, 'mooney': 1008, 'fighting': 1009, 'fingers': 1010, 'toes': 1011, 'john': 1012, 'hurroo': 1013, 'drums': 1014, 'beguiled': 1015, 'carry': 1016, 'bone': 1017, 'havent': 1018, 'walkin': 1019, 'kilgary': 1020, 'pepper': 1021, 'countin': 1022, 'forth': 1023, 'deliver': 1024, 'daddy': 1025, 'em': 1026, 'deceive': 1027, 'between': 1028, 'even': 1029, 'prisoner': 1030, 'fists': 1031, 'knocked': 1032, 'carriages': 1033, 'rollin': 1034, 'juice': 1035, 'courtin': 1036, 'ponchartrain': 1037, 'does': 1038, 'stranger': 1039, 'marry': 1040, 'adieu': 1041, 'ask': 1042, 'tipped': 1043, 'arrived': 1044, 'ladies': 1045, 'potatoes': 1046, 'courting': 1047, 'miss': 1048, 'small': 1049, 'ned': 1050, 'ribbons': 1051, 'heel': 1052, 'bonny': 1053, 'pipe': 1054, 'thrush': 1055, 'sweethearts': 1056, 'unto': 1057, 'rise': 1058, 'softly': 1059, 'milking': 1060, 'rare': 1061, 'pity': 1062, 'treasure': 1063, 'noon': 1064, 'sailing': 1065, 'banish': 1066, 'riches': 1067, 'comfort': 1068, 'yonder': 1069, 'flows': 1070, 'fairer': 1071, 'lass': 1072, 'woods': 1073, 'strayed': 1074, 'locks': 1075, 'breaking': 1076, 'june': 1077, 'started': 1078, 'hearted': 1079, 'beer': 1080, 'daylight': 1081, 'among': 1082, 'bundle': 1083, 'connaught': 1084, 'quay': 1085, 'erins': 1086, 'galway': 1087, 'fearless': 1088, 'bravely': 1089, 'marches': 1090, 'fate': 1091, 'neck': 1092, 'trod': 1093, 'marched': 1094, 'antrim': 1095, 'sash': 1096, 'flashed': 1097, 'hath': 1098, 'foemans': 1099, 'fight': 1100, 'heavy': 1101, 'bore': 1102, 'mans': 1103, 'counter': 1104, 'dozen': 1105, 'gallon': 1106, 'bottles': 1107, 'diamond': 1108, 'resemble': 1109, 'tiny': 1110, 'friendly': 1111, 'weather': 1112, 'inside': 1113, 'remember': 1114, 'someone': 1115, 'hat': 1116, 'body': 1117, 'dancers': 1118, 'hanging': 1119, 'empty': 1120, 'shoes': 1121, 'broke': 1122, 'december': 1123, 'move': 1124, 'reason': 1125, 'roof': 1126, 'naught': 1127, 'tower': 1128, 'power': 1129, 'king': 1130, 'dreaming': 1131, 'crew': 1132, 'whos': 1133, 'mccann': 1134, 'smoke': 1135, 'notes': 1136, 'yeoman': 1137, 'cavalry': 1138, 'guard': 1139, 'forced': 1140, 'brother': 1141, 'cousin': 1142, 'blame': 1143, 'croppy': 1144, 'dressed': 1145, 'trees': 1146, 'wore': 1147, 'words': 1148, 'swiftly': 1149, 'dawn': 1150, 'lovd': 1151, 'voices': 1152, 'moaning': 1153, 'dark': 1154, 'gather': 1155, 'tay': 1156, 'swinging': 1157, 'drinkin': 1158, 'sitting': 1159, 'stile': 1160, 'springing': 1161, 'yours': 1162, 'kept': 1163, 'aisey': 1164, 'rub': 1165, 'dub': 1166, 'dow': 1167, 'shelah': 1168, 'fairly': 1169, 'beggarman': 1170, 'begging': 1171, 'slept': 1172, 'holes': 1173, 'coming': 1174, 'thru': 1175, 'boo': 1176, 'lady': 1177, 'kerry': 1178, 'pipers': 1179, 'laugh': 1180, 'beaming': 1181, 'guineas': 1182, 'least': 1183, 'diggin': 1184, 'mourne': 1185, 'spending': 1186, 'mellow': 1187, 'plying': 1188, 'slowly': 1189, 'mooncoin': 1190, 'flow': 1191, 'sounds': 1192, 'shine': 1193, 'cool': 1194, 'crystal': 1195, 'fountain': 1196, 'moonlight': 1197, 'grandmother': 1198, 'crooning': 1199, 'merrily': 1200, 'spins': 1201, 'lightly': 1202, 'moving': 1203, 'lattice': 1204, 'grove': 1205, 'swings': 1206, 'finger': 1207, 'shamrock': 1208, 'pocket': 1209, 'springtime': 1210, 'gilgarra': 1211, 'rapier': 1212, 'ringum': 1213, 'mornin': 1214, 'heather': 1215, 'build': 1216, 'maidens': 1217, 'prime': 1218, 'nlyme': 1219, 'flavours': 1220, 'lusty': 1221, 'reminded': 1222, 'attend': 1223, 'guardian': 1224, 'creeping': 1225, 'dale': 1226, 'vigil': 1227, 'visions': 1228, 'revealing': 1229, 'breathes': 1230, 'holy': 1231, 'strains': 1232, 'hover': 1233, 'hark': 1234, 'solemn': 1235, 'winging': 1236, 'earthly': 1237, 'shalt': 1238, 'awaken': 1239, 'destiny': 1240, 'emigrants': 1241, 'amid': 1242, 'longing': 1243, 'parted': 1244, 'townland': 1245, 'vessel': 1246, 'crowded': 1247, 'disquieted': 1248, 'folk': 1249, 'escape': 1250, 'hardship': 1251, 'sustaining': 1252, 'glimpse': 1253, 'faded': 1254, 'strangely': 1255, 'seas': 1256, 'anger': 1257, 'desperate': 1258, 'plight': 1259, 'worsened': 1260, 'delirium': 1261, 'possessed': 1262, 'clouded': 1263, 'prayers': 1264, 'begged': 1265, 'forgiveness': 1266, 'seeking': 1267, 'distant': 1268, 'mither': 1269, 'simple': 1270, 'ditty': 1271, 'ld': 1272, 'li': 1273, 'hush': 1274, 'lullaby': 1275, 'huggin': 1276, 'hummin': 1277, 'rock': 1278, 'asleep': 1279, 'outside': 1280, 'modestly': 1281, 'ry': 1282, 'ay': 1283, 'di': 1284, 're': 1285, 'dai': 1286, 'rie': 1287, 'shc': 1288, 'bridle': 1289, 'stable': 1290, 'oats': 1291, 'eat': 1292, 'soldier': 1293, 'aisy': 1294, 'arose': 1295, 'christmas': 1296, '1803': 1297, 'australia': 1298, 'marks': 1299, 'carried': 1300, 'rusty': 1301, 'iron': 1302, 'wains': 1303, 'mainsails': 1304, 'unfurled': 1305, 'curses': 1306, 'hurled': 1307, 'swell': 1308, 'moth': 1309, 'firelights': 1310, 'horses': 1311, 'rode': 1312, 'taking': 1313, 'hades': 1314, 'twilight': 1315, 'forty': 1316, 'slime': 1317, 'climate': 1318, 'bravery': 1319, 'ended': 1320, 'bond': 1321, 'rebel': 1322, 'iii': 1323, 'violin': 1324, 'clay': 1325, 'sooner': 1326, 'sport': 1327, 'colour': 1328, 'knows': 1329, 'earth': 1330, 'serve': 1331, 'clyde': 1332, 'mourn': 1333, 'weep': 1334, 'suffer': 1335, 'diamonds': 1336, 'queen': 1337, 'hung': 1338, 'tied': 1339, 'apprenticed': 1340, 'happiness': 1341, 'misfortune': 1342, 'follow': 1343, 'strolling': 1344, 'selling': 1345, 'bar': 1346, 'customer': 1347, 'slipped': 1348, 'luck': 1349, 'jury': 1350, 'trial': 1351, 'case': 1352, 'warning': 1353, 'liquor': 1354, 'porter': 1355, 'pleasures': 1356, 'fishing': 1357, 'farming': 1358, 'glens': 1359, 'softest': 1360, 'dripping': 1361, 'snare': 1362, 'lose': 1363, 'court': 1364, 'primrose': 1365, 'bee': 1366, 'hopeless': 1367, 'wonders': 1368, 'admiration': 1369, 'haunt': 1370, 'wherever': 1371, 'sands': 1372, 'purer': 1373, 'within': 1374, 'grieve': 1375, 'drumslieve': 1376, 'ballygrant': 1377, 'deepest': 1378, 'boatsman': 1379, 'ferry': 1380, 'childhood': 1381, 'reflections': 1382, 'boyhood': 1383, 'melting': 1384, 'roaming': 1385, 'reported': 1386, 'marble': 1387, 'stones': 1388, 'ink': 1389, 'support': 1390, 'drunk': 1391, 'seldom': 1392, 'sick': 1393, 'numbered': 1394, 'foam': 1395, 'compare': 1396, 'sights': 1397, 'coast': 1398, 'clare': 1399, 'kilkee': 1400, 'kilrush': 1401, 'watching': 1402, 'pheasants': 1403, 'homes': 1404, 'streams': 1405, 'dublins': 1406, 'cockles': 1407, 'mussels': 1408, 'fish': 1409, 'monger': 1410, 'ghost': 1411, 'wheels': 1412, 'eden': 1413, 'vanished': 1414, 'finea': 1415, 'halfway': 1416, 'cootehill': 1417, 'gruff': 1418, 'whispering': 1419, 'crow': 1420, 'newborn': 1421, 'babies': 1422, 'huff': 1423, 'start': 1424, 'sorrowful': 1425, 'squall': 1426, 'babys': 1427, 'toil': 1428, 'worn': 1429, 'fore': 1430, 'flute': 1431, 'yer': 1432, 'boot': 1433, 'magee': 1434, 'scruff': 1435, 'slanderin': 1436, 'marchin': 1437, 'assisted': 1438, 'drain': 1439, 'dudeen': 1440, 'puff': 1441, 'whisperings': 1442, 'barrin': 1443, 'chocolate': 1444, 'feegee': 1445, 'sort': 1446, 'moonshiny': 1447, 'stuff': 1448, 'addle': 1449, 'brain': 1450, 'ringin': 1451, 'glamour': 1452, 'gas': 1453, 'guff': 1454, 'whisper': 1455, 'oil': 1456, 'remarkable': 1457, 'policeman': 1458, 'bluff': 1459, 'maintain': 1460, 'guril': 1461, 'sic': 1462, 'passage': 1463, 'rough': 1464, 'borne': 1465, 'breeze': 1466, 'boundless': 1467, 'stupendous': 1468, 'roll': 1469, 'thundering': 1470, 'motion': 1471, 'mermaids': 1472, 'fierce': 1473, 'tempest': 1474, 'gathers': 1475, 'oneill': 1476, 'odonnell': 1477, 'lucan': 1478, 'oconnell': 1479, 'brian': 1480, 'drove': 1481, 'danes': 1482, 'patrick': 1483, 'vermin': 1484, 'whose': 1485, 'benburb': 1486, 'blackwater': 1487, 'owen': 1488, 'roe': 1489, 'munroe': 1490, 'lambs': 1491, 'skip': 1492, 'views': 1493, 'enchanting': 1494, 'rostrevor': 1495, 'groves': 1496, 'lakes': 1497, 'ride': 1498, 'tide': 1499, 'majestic': 1500, 'shannon': 1501, 'sail': 1502, 'loch': 1503, 'neagh': 1504, 'ross': 1505, 'gorey': 1506, 'saxon': 1507, 'tory': 1508, 'soil': 1509, 'sanctified': 1510, 'enemies': 1511, 'links': 1512, 'encumbered': 1513, 'resound': 1514, 'hosannahs': 1515, 'bide': 1516, 'hushed': 1517, 'lying': 1518, 'kneel': 1519, 'ave': 1520, 'tread': 1521, 'fail': 1522, 'simply': 1523, 'gasworks': 1524, 'croft': 1525, 'dreamed': 1526, 'canal': 1527, 'factory': 1528, 'clouds': 1529, 'drifting': 1530, 'prowling': 1531, 'beat': 1532, 'springs': 1533, 'siren': 1534, 'docks': 1535, 'train': 1536, 'smelled': 1537, 'smokey': 1538, 'sharp': 1539, 'axe': 1540, 'steel': 1541, 'tempered': 1542, 'chop': 1543, 't': 1544, 'agree': 1545, 'leaning': 1546, 'weirs': 1547, 'ray': 1548, 'glow': 1549, 'changeless': 1550, 'constant': 1551, 'bounding': 1552, 'castles': 1553, 'sacked': 1554, 'scattered': 1555, 'fixed': 1556, 'endearing': 1557, 'gifts': 1558, 'fading': 1559, 'wouldst': 1560, 'adored': 1561, 'loveliness': 1562, 'ruin': 1563, 'itself': 1564, 'verdantly': 1565, 'unprofaned': 1566, 'fervor': 1567, 'faith': 1568, 'forgets': 1569, 'sunflower': 1570, 'rag': 1571, 'games': 1572, 'hold': 1573, 'defend': 1574, 'veteran': 1575, 'volunteers': 1576, 'pat': 1577, 'pearse': 1578, 'clark': 1579, 'macdonagh': 1580, 'macdiarmada': 1581, 'mcbryde': 1582, 'james': 1583, 'connolly': 1584, 'placed': 1585, 'machine': 1586, 'ranting': 1587, 'hour': 1588, 'bullet': 1589, 'stuck': 1590, 'craw': 1591, 'poisoning': 1592, 'ceannt': 1593, 'lions': 1594, 'union': 1595, 'poured': 1596, 'dismay': 1597, 'horror': 1598, 'englishmen': 1599, 'khaki': 1600, 'renown': 1601, 'fame': 1602, 'forefathers': 1603, 'blaze': 1604, 'priests': 1605, 'offer': 1606, 'charmin': 1607, 'variety': 1608, 'renownd': 1609, 'learnin': 1610, 'piety': 1611, 'advance': 1612, 'widout': 1613, 'impropriety': 1614, 'flowr': 1615, 'cho': 1616, 'powrfulest': 1617, 'preacher': 1618, 'tenderest': 1619, 'teacher': 1620, 'kindliest': 1621, 'donegal': 1622, 'talk': 1623, 'provost': 1624, 'trinity': 1625, 'famous': 1626, 'greek': 1627, 'latinity': 1628, 'divils': 1629, 'divinity': 1630, 'd': 1631, 'likes': 1632, 'logic': 1633, 'mythology': 1634, 'thayology': 1635, 'conchology': 1636, 'sinners': 1637, 'wishful': 1638, 'childer': 1639, 'avick': 1640, 'gad': 1641, 'flock': 1642, 'grandest': 1643, 'control': 1644, 'checking': 1645, 'coaxin': 1646, 'onaisy': 1647, 'lifting': 1648, 'avoidin': 1649, 'frivolity': 1650, 'seasons': 1651, 'innocent': 1652, 'jollity': 1653, 'playboy': 1654, 'claim': 1655, 'equality': 1656, 'comicality': 1657, 'bishop': 1658, 'lave': 1659, 'gaiety': 1660, 'laity': 1661, 'clergy': 1662, 'jewels': 1663, 'plundering': 1664, 'pillage': 1665, 'starved': 1666, 'cries': 1667, 'thems': 1668, 'bondage': 1669, 'fourth': 1670, 'tabhair': 1671, 'dom': 1672, 'lámh': 1673, 'harmony': 1674, 'east': 1675, 'destroy': 1676, 'command': 1677, 'gesture': 1678, 'troubles': 1679, 'weak': 1680, 'peoples': 1681, 'creeds': 1682, 'lets': 1683, 'needs': 1684, 'passion': 1685, 'fashion': 1686, 'guide': 1687, 'share': 1688, 'sparkling': 1689, 'meeting': 1690, 'iull': 1691, 'contented': 1692, 'ache': 1693, 'painful': 1694, 'wrote': 1695, 'twisted': 1696, 'twined': 1697, 'cheek': 1698, 'bedim': 1699, 'holds': 1700, 'smiles': 1701, 'scarcely': 1702, 'darkning': 1703, 'beyond': 1704, 'yearn': 1705, 'laughs': 1706, 'humble': 1707, 'brightest': 1708, 'gleam': 1709, 'forgot': 1710, 'pulled': 1711, 'comb': 1712, 'counting': 1713, 'knock': 1714, 'murray': 1715, 'fellow': 1716, 'hail': 1717, 'tumblin': 1718, 'apple': 1719, 'pie': 1720, 'gets': 1721, 'doleful': 1722, 'enemy': 1723, 'nearly': 1724, 'slew': 1725, 'queer': 1726, 'mild': 1727, 'legs': 1728, 'indeed': 1729, 'island': 1730, 'sulloon': 1731, 'flesh': 1732, 'yere': 1733, 'armless': 1734, 'boneless': 1735, 'chickenless': 1736, 'egg': 1737, 'yell': 1738, 'bowl': 1739, 'rolling': 1740, 'swearing': 1741, 'rattled': 1742, 'saber': 1743, 'deceiver': 1744, 'rig': 1745, 'um': 1746, 'du': 1747, 'rum': 1748, 'jar': 1749, 'shinin': 1750, 'coins': 1751, 'promised': 1752, 'vowed': 1753, 'devils': 1754, 'awakened': 1755, 'six': 1756, 'guards': 1757, 'numbers': 1758, 'odd': 1759, 'flew': 1760, 'mistaken': 1761, 'mollys': 1762, 'robbing': 1763, 'sentry': 1764, 'sligo': 1765, 'fishin': 1766, 'bowlin': 1767, 'others': 1768, 'railroad': 1769, 'ties': 1770, 'crossings': 1771, 'swamps': 1772, 'elevations': 1773, 'resolved': 1774, 'sunset': 1775, 'higher': 1776, 'win': 1777, 'allegators': 1778, 'wood': 1779, 'treated': 1780, 'shoulders': 1781, 'paint': 1782, 'picture': 1783, 'vain': 1784, 'returned': 1785, 'cottage': 1786, 'sociable': 1787, 'foaming': 1788, 'n': 1789, 'jeremy': 1790, 'lanigan': 1791, 'battered': 1792, 'hadnt': 1793, 'pound': 1794, 'farm': 1795, 'acres': 1796, 'party': 1797, 'listen': 1798, 'glisten': 1799, 'rows': 1800, 'ructions': 1801, 'invitation': 1802, 'minute': 1803, 'bees': 1804, 'cask': 1805, 'judy': 1806, 'odaly': 1807, 'milliner': 1808, 'wink': 1809, 'peggy': 1810, 'mcgilligan': 1811, 'lashings': 1812, 'punch': 1813, 'cakes': 1814, 'bacon': 1815, 'tea': 1816, 'nolans': 1817, 'dolans': 1818, 'ogradys': 1819, 'sounded': 1820, 'taras': 1821, 'hall': 1822, 'nelly': 1823, 'gray': 1824, 'rat': 1825, 'catchers': 1826, 'doing': 1827, 'kinds': 1828, 'nonsensical': 1829, 'polkas': 1830, 'whirligig': 1831, 'julia': 1832, 'banished': 1833, 'nonsense': 1834, 'twist': 1835, 'jig': 1836, 'mavrone': 1837, 'mad': 1838, 'ceiling': 1839, 'brooks': 1840, 'academy': 1841, 'learning': 1842, 'learn': 1843, 'couples': 1844, 'groups': 1845, 'accident': 1846, 'happened': 1847, 'terrance': 1848, 'mccarthy': 1849, 'finnertys': 1850, 'hoops': 1851, 'cried': 1852, 'meelia': 1853, 'murther': 1854, 'gathered': 1855, 'carmody': 1856, 'further': 1857, 'satisfaction': 1858, 'midst': 1859, 'kerrigan': 1860, 'declared': 1861, 'painted': 1862, 'suppose': 1863, 'morgan': 1864, 'powerful': 1865, 'stretched': 1866, 'smashed': 1867, 'chaneys': 1868, 'runctions': 1869, 'lick': 1870, 'phelim': 1871, 'mchugh': 1872, 'replied': 1873, 'introduction': 1874, 'kicked': 1875, 'terrible': 1876, 'hullabaloo': 1877, 'piper': 1878, 'strangled': 1879, 'squeezed': 1880, 'bellows': 1881, 'chanters': 1882, 'entangled': 1883, 'gaily': 1884, 'mairis': 1885, 'hillways': 1886, 'myrtle': 1887, 'bracken': 1888, 'sheilings': 1889, 'sake': 1890, 'rowans': 1891, 'herring': 1892, 'meal': 1893, 'peat': 1894, 'creel': 1895, 'bairns': 1896, 'weel': 1897, 'toast': 1898, 'soar': 1899, 'blackbird': 1900, 'note': 1901, 'linnet': 1902, 'lure': 1903, 'cozy': 1904, 'catch': 1905, 'company': 1906, 'harm': 1907, 'wit': 1908, 'recall': 1909, 'leisure': 1910, 'awhile': 1911, 'sorely': 1912, 'ruby': 1913, 'enthralled': 1914, 'sorry': 1915, 'theyd': 1916, 'falls': 1917, 'lot': 1918, 'tuned': 1919, 'bough': 1920, 'cow': 1921, 'chanting': 1922, 'melodious': 1923, 'scarce': 1924, 'soothed': 1925, 'solace': 1926, 'courtesy': 1927, 'salute': 1928, 'amiable': 1929, 'captive': 1930, 'slave': 1931, 'future': 1932, 'banter': 1933, 'enamour': 1934, 'indies': 1935, 'afford': 1936, 'transparently': 1937, 'flame': 1938, 'add': 1939, 'fuel': 1940, 'grant': 1941, 'desire': 1942, 'expire': 1943, 'wealth': 1944, 'damer': 1945, 'african': 1946, 'devonshire': 1947, 'lamp': 1948, 'alladin': 1949, 'genie': 1950, 'also': 1951, 'withdraw': 1952, 'tease': 1953, 'single': 1954, 'airy': 1955, 'embarrass': 1956, 'besides': 1957, 'almanack': 1958, 'useless': 1959, 'date': 1960, 'ware': 1961, 'rate': 1962, 'fragrance': 1963, 'loses': 1964, 'consumed': 1965, 'october': 1966, 'knowing': 1967, 'steer': 1968, 'blast': 1969, 'danger': 1970, 'farthing': 1971, 'affection': 1972, 'enjoy': 1973, 'choose': 1974, 'killarneys': 1975, 'sister': 1976, 'pains': 1977, 'loss': 1978, 'tuam': 1979, 'saluted': 1980, 'drank': 1981, 'pint': 1982, 'smother': 1983, 'reap': 1984, 'cut': 1985, 'goblins': 1986, 'bought': 1987, 'brogues': 1988, 'rattling': 1989, 'bogs': 1990, 'frightning': 1991, 'dogs': 1992, 'hunt': 1993, 'hare': 1994, 'follol': 1995, 'rah': 1996, 'mullingar': 1997, 'rested': 1998, 'limbs': 1999, 'blithe': 2000, 'heartfrom': 2001, 'paddys': 2002, 'cure': 2003, 'lassies': 2004, 'laughing': 2005, 'curious': 2006, 'style': 2007, 'twould': 2008, 'bubblin': 2009, 'hired': 2010, 'wages': 2011, 'required': 2012, 'almost': 2013, 'deprived': 2014, 'stroll': 2015, 'quality': 2016, 'locality': 2017, 'something': 2018, 'wobblin': 2019, 'enquiring': 2020, 'rogue': 2021, 'brogue': 2022, 'wasnt': 2023, 'vogue': 2024, 'spirits': 2025, 'falling': 2026, 'jumped': 2027, 'aboard': 2028, 'pigs': 2029, 'rigs': 2030, 'jigs': 2031, 'bubbling': 2032, 'holyhead': 2033, 'wished': 2034, 'instead': 2035, 'bouys': 2036, 'liverpool': 2037, 'safely': 2038, 'fool': 2039, 'boil': 2040, 'temper': 2041, 'losing': 2042, 'abusing': 2043, 'shillelagh': 2044, 'nigh': 2045, 'hobble': 2046, 'load': 2047, 'hurray': 2048, 'joined': 2049, 'affray': 2050, 'quitely': 2051, 'cleared': 2052, 'host': 2053, 'march': 2054, 'faces': 2055, 'farmstead': 2056, 'fishers': 2057, 'ban': 2058, 'vengeance': 2059, 'hapless': 2060, 'about': 2061, 'hemp': 2062, 'rope': 2063, 'clung': 2064, 'grim': 2065, 'array': 2066, 'earnest': 2067, 'stalwart': 2068, 'stainless': 2069, 'banner': 2070, 'marching': 2071, 'torn': 2072, 'furious': 2073, 'odds': 2074, 'keen': 2075, 'toomebridge': 2076, 'treads': 2077, 'upwards': 2078, 'traveled': 2079, 'quarters': 2080, 'below': 2081, 'hogshead': 2082, 'stack': 2083, 'stagger': 2084, 'dig': 2085, 'hole': 2086, 'couple': 2087, 'scratch': 2088, 'consolation': 2089, 'tyrant': 2090, 'remorseless': 2091, 'foe': 2092, 'lift': 2093, 'stranded': 2094, 'prince': 2095, 'edward': 2096, 'coffee': 2097, 'trace': 2098, 'fiddlin': 2099, 'dime': 2100, 'shy': 2101, 'hello': 2102, 'wintry': 2103, 'yellow': 2104, 'somewhere': 2105, 'written': 2106, 'begin': 2107, 'tap': 2108, 'caught': 2109, 'leap': 2110, 'clumsy': 2111, 'graceful': 2112, 'fiddlers': 2113, 'everywhere': 2114, 'boots': 2115, 'laughtcr': 2116, 'suits': 2117, 'easter': 2118, 'gowns': 2119, 'sailors': 2120, 'pianos': 2121, 'setting': 2122, 'someones': 2123, 'hats': 2124, 'rack': 2125, 'chair': 2126, 'wooden': 2127, 'feels': 2128, 'touch': 2129, 'awaitin': 2130, 'thc': 2131, 'fiddles': 2132, 'closet': 2133, 'strings': 2134, 'tbe': 2135, 'covers': 2136, 'buttoned': 2137, 'sometimes': 2138, 'melody': 2139, 'passes': 2140, 'slight': 2141, 'lack': 2142, 'moved': 2143, 'homeward': 2144, 'swan': 2145, 'moves': 2146, 'goods': 2147, 'gear': 2148, 'din': 2149, 'rude': 2150, 'wherein': 2151, 'dwell': 2152, 'abandon': 2153, 'energy': 2154, 'blight': 2155, 'praties': 2156, 'sheep': 2157, 'cattle': 2158, 'taxes': 2159, 'unpaid': 2160, 'redeem': 2161, 'bleak': 2162, 'landlord': 2163, 'sheriff': 2164, 'spleen': 2165, 'heaved': 2166, 'sigh': 2167, 'bade': 2168, 'goodbye': 2169, 'stony': 2170, 'anguish': 2171, 'seeing': 2172, 'feeble': 2173, 'frame': 2174, 'wrapped': 2175, 'c�ta': 2176, 'm�r': 2177, 'unseen': 2178, 'stern': 2179, 'rally': 2180, 'cheer': 2181, 'revenge': 2182, 'waking': 2183, 'wisdom': 2184, 'dwelling': 2185, 'battleshield': 2186, 'dignity': 2187, 'shelter': 2188, 'heed': 2189, 'inheritance': 2190, 'heavem': 2191, 'heaven': 2192, 'victory': 2193, 'reach': 2194, 'whatever': 2195, 'befall': 2196, 'ruler': 2197, 'pleasant': 2198, 'rambling': 2199, 'board': 2200, 'followed': 2201, 'shortly': 2202, 'anchor': 2203, '23rd': 2204, 'lrelands': 2205, 'daughters': 2206, 'crowds': 2207, 'assembled': 2208, 'fulfill': 2209, 'jovial': 2210, 'conversations': 2211, 'neighbors': 2212, 'turning': 2213, 'tailor': 2214, 'quigley': 2215, 'bould': 2216, 'britches': 2217, 'lived': 2218, 'flying': 2219, 'dove': 2220, 'hiii': 2221, 'dreamt': 2222, 'joking': 2223, 'manys': 2224, 'cock': 2225, 'shrill': 2226, 'awoke': 2227, 'california': 2228, 'miles': 2229, 'banbridge': 2230, 'july': 2231, 'boreen': 2232, 'sheen': 2233, 'coaxing': 2234, 'elf': 2235, 'shake': 2236, 'bantry': 2237, 'onward': 2238, 'sped': 2239, 'gazed': 2240, 'passerby': 2241, 'gem': 2242, 'irelands': 2243, 'travelled': 2244, 'hit': 2245, 'career': 2246, 'square': 2247, 'surrendered': 2248, 'tenant': 2249, 'shawl': 2250, 'gown': 2251, 'crossroads': 2252, 'dress': 2253, 'try': 2254, 'sheeps': 2255, 'deludhering': 2256, 'yoke': 2257, 'rust': 2258, 'plow': 2259, 'fireside': 2260, 'sits': 2261, 'whistle': 2262, 'changing': 2263, 'fright': 2264, 'downfall': 2265, 'cornwall': 2266, 'parlour': 2267, 'passing': 2268, 'william': 2269, 'betray': 2270, 'guinea': 2271, 'walking': 2272, 'mounted': 2273, 'platform': 2274, 'deny': 2275, 'walked': 2276, 'margin': 2277, 'lough': 2278, 'leane': 2279, 'bloomed': 2280, 'whom': 2281, 'cap': 2282, 'cloak': 2283, 'glossy': 2284, 'pail': 2285, 'palm': 2286, 'venus': 2287, 'bank': 2288, 'travelians': 2289, 'babes': 2290, 'freebirds': 2291, 'grew': 2292, 'matters': 2293, 'famine': 2294, 'rebelled': 2295, 'windswept': 2296, 'harbour': 2297, 'botany': 2298, 'whilst': 2299, 'wan': 2300, 'cloud': 2301, 'shannons': 2302, 'returnd': 2303, 'doubts': 2304, 'fears': 2305, 'aching': 2306, 'seemd': 2307, 'mingling': 2308, 'flood': 2309, 'path': 2310, 'wrath': 2311, 'lamenting': 2312, 'sudden': 2313, 'kissd': 2314, 'showrs': 2315, 'flowing': 2316, 'laughd': 2317, 'beam': 2318, 'soared': 2319, 'aloft': 2320, 'phantom': 2321, 'outspread': 2322, 'throbbing': 2323, 'hid': 2324, 'treasures': 2325, 'pots': 2326, 'tin': 2327, 'cans': 2328, 'mash': 2329, 'bran': 2330, 'barney': 2331, 'peeled': 2332, 'searching': 2333, 'connemara': 2334, 'butcher': 2335, 'quart': 2336, 'bottle': 2337, 'help': 2338, 'gate': 2339, 'glory': 2340, 'lane': 2341, 'village': 2342, 'church': 2343, 'spire': 2344, 'graveyard': 2345, 'baby': 2346, 'blessing': 2347, 'hoping': 2348, 'trust': 2349, 'strength': 2350, 'thank': 2351, 'bidding': 2352, 'bread': 2353, 'shines': 2354, 'fifty': 2355, 'often': 2356, 'shut': 2357, 'frisky': 2358, 'pig': 2359, 'whisky': 2360, 'uncle': 2361, 'enlisted': 2362, 'trudged': 2363, 'bosom': 2364, 'daisy': 2365, 'drubbing': 2366, 'shirts': 2367, 'battle': 2368, 'blows': 2369, 'pate': 2370, 'bothered': 2371, 'rarely': 2372, 'dropped': 2373, 'honest': 2374, 'thinks': 2375, 'eight': 2376, 'score': 2377, 'basin': 2378, 'zoo': 2379, 'everybody': 2380, 'calls': 2381, 'trades': 2382, 'dinner': 2383, 'slip': 2384, 'corner': 2385, 'barn': 2386, 'currabawn': 2387, 'shocking': 2388, 'wet': 2389, 'raindrops': 2390, 'rats': 2391, 'peek': 2392, 'waken': 2393, 'spotted': 2394, 'apron': 2395, 'calico': 2396, 'blouse': 2397, 'frighten': 2398, 'afraid': 2399, 'flaxen': 2400, 'haired': 2401, 'rags': 2402, 'tags': 2403, 'leggins': 2404, 'collar': 2405, 'tie': 2406, 'goggles': 2407, 'fashioned': 2408, 'bag': 2409, 'bulging': 2410, 'sack': 2411, 'peeping': 2412, 'skin': 2413, 'rink': 2414, 'doodle': 2415, 'getting': 2416, 'raked': 2417, 'gladness': 2418, 'tuning': 2419, 'fills': 2420, 'eily': 2421, 'prouder': 2422, 'thady': 2423, 'boldly': 2424, 'lasses': 2425, 'fled': 2426, 'silent': 2427, 'glad': 2428, 'echo': 2429, 'companions': 2430, 'soars': 2431, 'enchanted': 2432, 'granted': 2433, 'adoration': 2434, 'gives': 2435, 'joyous': 2436, 'elation': 2437, 'covered': 2438, 'winter': 2439, 'riding': 2440, 'cherry': 2441, 'coal': 2442, 'falter': 2443, 'bowed': 2444, 'bonnet': 2445, 'courteous': 2446, 'looks': 2447, 'engaging': 2448, 'sell': 2449, 'purse': 2450, 'yearly': 2451, 'need': 2452, 'market': 2453, 'gain': 2454, 'dearly': 2455, 'tarry': 2456, 'although': 2457, 'parlay': 2458, 'ranks': 2459, 'girded': 2460, 'slung': 2461, 'warrior': 2462, 'bard': 2463, 'betrays': 2464, 'rights': 2465, 'faithful': 2466, 'chords': 2467, 'asunder': 2468, 'sully': 2469, 'bravry': 2470, 'londons': 2471, 'sight': 2472, 'workin': 2473, 'sow': 2474, 'wheat': 2475, 'gangs': 2476, 'sweep': 2477, 'expressed': 2478, 'london': 2479, 'top': 2480, 'dresses': 2481, 'bath': 2482, 'startin': 2483, 'fashions': 2484, 'mccree': 2485, 'nature': 2486, 'designed': 2487, 'complexions': 2488, 'cream': 2489, 'regard': 2490, 'sip': 2491, 'colors': 2492, 'wait': 2493, 'waitin': 2494, 'sweeps': 2495, 'beauing': 2496, 'belling': 2497, 'windows': 2498, 'cursing': 2499, 'faster': 2500, 'waiters': 2501, 'bailiffs': 2502, 'duns': 2503, 'bacchus': 2504, 'begotten': 2505, 'politicians': 2506, 'funds': 2507, 'dadda': 2508, 'living': 2509, 'drives': 2510, 'having': 2511, 'racking': 2512, 'tenants': 2513, 'stewards': 2514, 'teasing': 2515, 'raising': 2516, 'wishing': 2517, 'sunny': 2518, 'doves': 2519, 'coo': 2520, 'neath': 2521, 'sunbeam': 2522, 'robin': 2523, 'waters': 2524, 'larks': 2525, 'join': 2526, 'breaks': 2527, 'oftimes': 2528, 'lilies': 2529, 'declining': 2530, 'vale': 2531, 'shades': 2532, 'mantle': 2533, 'spreading': 2534, 'listening': 2535, 'shedding': 2536, 'beginning': 2537, 'spinning': 2538, 'blind': 2539, 'drowsily': 2540, 'knitting': 2541, 'cheerily': 2542, 'noiselessly': 2543, 'whirring': 2544, 'foots': 2545, 'stirring': 2546, 'sprightly': 2547, 'chara': 2548, 'tapping': 2549, 'ivy': 2550, 'flapping': 2551, 'somebody': 2552, 'sighing': 2553, 'autumn': 2554, 'noise': 2555, 'chirping': 2556, 'holly': 2557, 'shoving': 2558, 'wrong': 2559, 'coolin': 2560, 'casement': 2561, 'rove': 2562, 'moons': 2563, 'brightly': 2564, 'shakes': 2565, 'lays': 2566, 'longs': 2567, 'lingers': 2568, 'glance': 2569, 'puts': 2570, 'lazily': 2571, 'easily': 2572, 'lowly': 2573, 'reels': 2574, 'noiseless': 2575, 'leaps': 2576, 'ere': 2577, 'lovers': 2578, 'roved': 2579, 'verdant': 2580, 'braes': 2581, 'skreen': 2582, 'countrie': 2583, 'foreign': 2584, 'strand': 2585, 'dewy': 2586, 'climb': 2587, 'rob': 2588, 'boat': 2589, 'sails': 2590, 'loaded': 2591, 'sink': 2592, 'leaned': 2593, 'oak': 2594, 'trusty': 2595, 'false': 2596, 'reached': 2597, 'pricked': 2598, 'waxes': 2599, 'fades': 2600, 'wholl': 2601, 'cockle': 2602, 'gloom': 2603, 'news': 2604, 'forbid': 2605, 'patricks': 2606, 'napper': 2607, 'tandy': 2608, 'hows': 2609, 'distressful': 2610, 'englands': 2611, 'remind': 2612, 'pull': 2613, 'throw': 2614, 'sod': 2615, 'root': 2616, 'underfoot': 2617, 'laws': 2618, 'blades': 2619, 'growin': 2620, 'dare': 2621, 'show': 2622, 'caubeen': 2623, 'year': 2624, 'returning': 2625, 'store': 2626, 'ale': 2627, 'frequent': 2628, 'landlady': 2629, 'credit': 2630, 'custom': 2631, 'sovereigns': 2632, 'landladys': 2633, 'wines': 2634, 'confess': 2635, 'pardon': 2636, 'prodigal': 2637, 'caress': 2638, 'forgive': 2639, 'ofttimes': 2640, 'wondering': 2641, 'powr': 2642, 'beguile': 2643, 'teardrop': 2644, 'lilting': 2645, 'laughters': 2646, 'twinkle': 2647, 'lilt': 2648, 'seems': 2649, 'linnets': 2650, 'real': 2651, 'regret': 2652, 'throughout': 2653, 'youths': 2654, 'chance': 2655, 'spied': 2656, 'receiver': 2657, 'counted': 2658, 'penny': 2659, 'bu': 2660, 'rungum': 2661, 'chamber': 2662, 'course': 2663, 'charges': 2664, 'filled': 2665, 'ready': 2666, 'footmen': 2667, 'likewise': 2668, 'draw': 2669, 'pistol': 2670, 'couldnt': 2671, 'shoot': 2672, 'robbin': 2673, 'jailer': 2674, 'tight': 2675, 'fisted': 2676, 'army': 2677, 'stationed': 2678, 'cork': 2679, 'roamin': 2680, 'swear': 2681, 'treat': 2682, 'sportin': 2683, 'hurley': 2684, 'bollin': 2685, 'maids': 2686, 'summertime': 2687, 'pluck': 2688, 'yon': 2689}
total words: 2690
###Markdown
Preprocessing the DatasetNext, you will generate the inputs and labels for your model. The process will be identical to the previous lab. The `xs` or inputs to the model will be padded sequences, while the `ys` or labels are one-hot encoded arrays.
###Code
# Initialize the sequences list
input_sequences = []
# Loop over every line
for line in corpus:
# Tokenize the current line
token_list = tokenizer.texts_to_sequences([line])[0]
# Loop over the line several times to generate the subphrases
for i in range(1, len(token_list)):
# Generate the subphrase
n_gram_sequence = token_list[:i+1]
# Append the subphrase to the sequences list
input_sequences.append(n_gram_sequence)
# Get the length of the longest line
max_sequence_len = max([len(x) for x in input_sequences])
# Pad all sequences
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
# Create inputs and label by splitting the last token in the subphrases
xs, labels = input_sequences[:,:-1],input_sequences[:,-1]
# Convert the label into one-hot arrays
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
###Output
_____no_output_____
###Markdown
You can then print some of the examples as a sanity check.
###Code
# Get sample sentence
sentence = corpus[0].split()
print(f'sample sentence: {sentence}')
# Initialize token list
token_list = []
# Look up the indices of each word and append to the list
for word in sentence:
token_list.append(tokenizer.word_index[word])
# Print the token list
print(token_list)
# Pick element
elem_number = 5
# Print token list and phrase
print(f'token list: {xs[elem_number]}')
print(f'decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}')
# Print label
print(f'one-hot label: {ys[elem_number]}')
print(f'index of label: {np.argmax(ys[elem_number])}')
# Pick element
elem_number = 4
# Print token list and phrase
print(f'token list: {xs[elem_number]}')
print(f'decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}')
# Print label
print(f'one-hot label: {ys[elem_number]}')
print(f'index of label: {np.argmax(ys[elem_number])}')
###Output
token list: [ 0 0 0 0 0 0 0 0 0 0 51 12 96 1217
48]
decoded to text: ['come all ye maidens young']
one-hot label: [0. 0. 1. ... 0. 0. 0.]
index of label: 2
###Markdown
Build and compile the ModelNext, you will build and compile the model. We placed some of the hyperparameters at the top of the code cell so you can easily tweak it later if you want.
###Code
# Hyperparameters
embedding_dim = 100
lstm_units = 150
learning_rate = 0.01
# Build the model
model = Sequential([
Embedding(total_words, embedding_dim, input_length=max_sequence_len-1),
Bidirectional(LSTM(lstm_units)),
Dense(total_words, activation='softmax')
])
# Use categorical crossentropy because this is a multi-class problem
model.compile(
loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
metrics=['accuracy']
)
# Print the model summary
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 15, 100) 269000
bidirectional (Bidirectiona (None, 300) 301200
l)
dense (Dense) (None, 2690) 809690
=================================================================
Total params: 1,379,890
Trainable params: 1,379,890
Non-trainable params: 0
_________________________________________________________________
###Markdown
Train the modelFrom the model summary above, you'll notice that the number of trainable params is much larger than the one in the previous lab. Consequently, that usually means a slower training time. It will take roughly 7 seconds per epoch with the GPU enabled in Colab and you'll reach around 76% accuracy after 100 epochs.
###Code
epochs = 100
# Train the model
history = model.fit(xs, ys, epochs=epochs)
###Output
Epoch 1/100
377/377 [==============================] - 15s 20ms/step - loss: 6.6652 - accuracy: 0.0690
Epoch 2/100
377/377 [==============================] - 7s 18ms/step - loss: 5.7850 - accuracy: 0.1124
Epoch 3/100
377/377 [==============================] - 7s 18ms/step - loss: 4.9840 - accuracy: 0.1552
Epoch 4/100
377/377 [==============================] - 7s 18ms/step - loss: 4.0857 - accuracy: 0.2246
Epoch 5/100
377/377 [==============================] - 7s 19ms/step - loss: 3.2010 - accuracy: 0.3244
Epoch 6/100
377/377 [==============================] - 7s 18ms/step - loss: 2.5045 - accuracy: 0.4352
Epoch 7/100
377/377 [==============================] - 7s 19ms/step - loss: 1.9817 - accuracy: 0.5349
Epoch 8/100
377/377 [==============================] - 7s 18ms/step - loss: 1.5584 - accuracy: 0.6306
Epoch 9/100
377/377 [==============================] - 7s 19ms/step - loss: 1.3190 - accuracy: 0.6781
Epoch 10/100
377/377 [==============================] - 7s 18ms/step - loss: 1.1752 - accuracy: 0.7176
Epoch 11/100
377/377 [==============================] - 7s 19ms/step - loss: 1.0530 - accuracy: 0.7441
Epoch 12/100
377/377 [==============================] - 7s 18ms/step - loss: 1.0170 - accuracy: 0.7489
Epoch 13/100
377/377 [==============================] - 7s 19ms/step - loss: 1.0103 - accuracy: 0.7464
Epoch 14/100
377/377 [==============================] - 7s 18ms/step - loss: 1.0417 - accuracy: 0.7392
Epoch 15/100
377/377 [==============================] - 7s 18ms/step - loss: 1.1171 - accuracy: 0.7130
Epoch 16/100
377/377 [==============================] - 7s 18ms/step - loss: 1.0951 - accuracy: 0.7187
Epoch 17/100
377/377 [==============================] - 7s 18ms/step - loss: 1.0530 - accuracy: 0.7303
Epoch 18/100
377/377 [==============================] - 7s 18ms/step - loss: 1.0874 - accuracy: 0.7250
Epoch 19/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9209 - accuracy: 0.7654
Epoch 20/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8744 - accuracy: 0.7759
Epoch 21/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9015 - accuracy: 0.7668
Epoch 22/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9520 - accuracy: 0.7554
Epoch 23/100
377/377 [==============================] - 7s 19ms/step - loss: 1.0149 - accuracy: 0.7366
Epoch 24/100
377/377 [==============================] - 7s 19ms/step - loss: 1.0029 - accuracy: 0.7382
Epoch 25/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9719 - accuracy: 0.7446
Epoch 26/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9022 - accuracy: 0.7625
Epoch 27/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8892 - accuracy: 0.7643
Epoch 28/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8651 - accuracy: 0.7704
Epoch 29/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8485 - accuracy: 0.7795
Epoch 30/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9545 - accuracy: 0.7559
Epoch 31/100
377/377 [==============================] - 7s 18ms/step - loss: 1.0020 - accuracy: 0.7391
Epoch 32/100
377/377 [==============================] - 7s 18ms/step - loss: 1.0551 - accuracy: 0.7275
Epoch 33/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9589 - accuracy: 0.7487
Epoch 34/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9417 - accuracy: 0.7500
Epoch 35/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9047 - accuracy: 0.7653
Epoch 36/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8531 - accuracy: 0.7748
Epoch 37/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8800 - accuracy: 0.7731
Epoch 38/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8848 - accuracy: 0.7692
Epoch 39/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9185 - accuracy: 0.7633
Epoch 40/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9373 - accuracy: 0.7545
Epoch 41/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9689 - accuracy: 0.7451
Epoch 42/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9442 - accuracy: 0.7539
Epoch 43/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9024 - accuracy: 0.7626
Epoch 44/100
377/377 [==============================] - 7s 19ms/step - loss: 0.8592 - accuracy: 0.7727
Epoch 45/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8303 - accuracy: 0.7815
Epoch 46/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8815 - accuracy: 0.7629
Epoch 47/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9327 - accuracy: 0.7539
Epoch 48/100
377/377 [==============================] - 7s 18ms/step - loss: 1.0105 - accuracy: 0.7436
Epoch 49/100
377/377 [==============================] - 7s 18ms/step - loss: 1.0363 - accuracy: 0.7346
Epoch 50/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9406 - accuracy: 0.7551
Epoch 51/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9124 - accuracy: 0.7635
Epoch 52/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8798 - accuracy: 0.7711
Epoch 53/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8597 - accuracy: 0.7733
Epoch 54/100
377/377 [==============================] - 7s 19ms/step - loss: 0.8656 - accuracy: 0.7702
Epoch 55/100
377/377 [==============================] - 7s 19ms/step - loss: 0.8935 - accuracy: 0.7676
Epoch 56/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8799 - accuracy: 0.7695
Epoch 57/100
377/377 [==============================] - 7s 19ms/step - loss: 0.8488 - accuracy: 0.7732
Epoch 58/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8697 - accuracy: 0.7721
Epoch 59/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8617 - accuracy: 0.7745
Epoch 60/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8399 - accuracy: 0.7801
Epoch 61/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8768 - accuracy: 0.7693
Epoch 62/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9151 - accuracy: 0.7632
Epoch 63/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9551 - accuracy: 0.7528
Epoch 64/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9713 - accuracy: 0.7460
Epoch 65/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9848 - accuracy: 0.7435
Epoch 66/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9467 - accuracy: 0.7508
Epoch 67/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9209 - accuracy: 0.7603
Epoch 68/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9629 - accuracy: 0.7521
Epoch 69/100
377/377 [==============================] - 7s 19ms/step - loss: 0.8990 - accuracy: 0.7644
Epoch 70/100
377/377 [==============================] - 7s 19ms/step - loss: 0.8669 - accuracy: 0.7743
Epoch 71/100
377/377 [==============================] - 7s 19ms/step - loss: 0.8290 - accuracy: 0.7839
Epoch 72/100
377/377 [==============================] - 7s 19ms/step - loss: 0.8401 - accuracy: 0.7786
Epoch 73/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8137 - accuracy: 0.7874
Epoch 74/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8523 - accuracy: 0.7785
Epoch 75/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8450 - accuracy: 0.7774
Epoch 76/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8348 - accuracy: 0.7792
Epoch 77/100
377/377 [==============================] - 7s 19ms/step - loss: 0.8892 - accuracy: 0.7629
Epoch 78/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9445 - accuracy: 0.7544
Epoch 79/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9629 - accuracy: 0.7480
Epoch 80/100
377/377 [==============================] - 7s 18ms/step - loss: 1.0003 - accuracy: 0.7420
Epoch 81/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9887 - accuracy: 0.7459
Epoch 82/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9547 - accuracy: 0.7517
Epoch 83/100
377/377 [==============================] - 7s 19ms/step - loss: 0.8965 - accuracy: 0.7669
Epoch 84/100
377/377 [==============================] - 7s 19ms/step - loss: 0.8121 - accuracy: 0.7863
Epoch 85/100
377/377 [==============================] - 7s 19ms/step - loss: 0.7976 - accuracy: 0.7917
Epoch 86/100
377/377 [==============================] - 7s 19ms/step - loss: 0.7811 - accuracy: 0.7964
Epoch 87/100
377/377 [==============================] - 7s 18ms/step - loss: 0.7863 - accuracy: 0.7963
Epoch 88/100
377/377 [==============================] - 7s 19ms/step - loss: 0.8458 - accuracy: 0.7788
Epoch 89/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9238 - accuracy: 0.7650
Epoch 90/100
377/377 [==============================] - 7s 19ms/step - loss: 1.1304 - accuracy: 0.7234
Epoch 91/100
377/377 [==============================] - 7s 19ms/step - loss: 1.0912 - accuracy: 0.7244
Epoch 92/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9905 - accuracy: 0.7487
Epoch 93/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9555 - accuracy: 0.7579
Epoch 94/100
377/377 [==============================] - 7s 19ms/step - loss: 0.9210 - accuracy: 0.7717
Epoch 95/100
377/377 [==============================] - 7s 19ms/step - loss: 0.8493 - accuracy: 0.7797
Epoch 96/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8310 - accuracy: 0.7856
Epoch 97/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8132 - accuracy: 0.7921
Epoch 98/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8359 - accuracy: 0.7856
Epoch 99/100
377/377 [==============================] - 7s 18ms/step - loss: 0.8959 - accuracy: 0.7799
Epoch 100/100
377/377 [==============================] - 7s 18ms/step - loss: 0.9406 - accuracy: 0.7640
###Markdown
You can visualize the accuracy below to see how it fluctuates as the training progresses.
###Code
import matplotlib.pyplot as plt
# Plot utility
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.show()
# Visualize the accuracy
plot_graphs(history, 'accuracy')
###Output
_____no_output_____
###Markdown
Generating TextNow you can let the model make its own songs or poetry! Because it is trained on a much larger corpus, the results below should contain less repetitions as before. The code below picks the next word based on the highest probability output.
###Code
# Define seed text
seed_text = "help me obi-wan kinobi youre my only hope"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
# Convert the seed text to a token sequence
token_list = tokenizer.texts_to_sequences([seed_text])[0]
# Pad the sequence
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
# Feed to the model and get the probabilities for each index
probabilities = model.predict(token_list)
# Get the index with the highest probability
predicted = np.argmax(probabilities, axis=-1)[0]
# Ignore if index is 0 because that is just the padding.
if predicted != 0:
# Look up the word associated with the index.
output_word = tokenizer.index_word[predicted]
# Combine with the seed text
seed_text += " " + output_word
# Print the result
print(seed_text)
###Output
help me obi-wan kinobi youre my only hope one he said once young and pretty my love my love and gone i ended your eyes opened appear my love is gone by love grow cure gone gone free gone gone by your waters eyes gone for now since gone gone gone gone calling gone gone gone free whack law agin had gone frivolity gone shine elation had had gone gone frivolity now right view took calls me shaken gone gone gone gone gone by night under went over gilgarra mountain side gone gone gone gone gone gone gone live gone i returnd my pistol shed town on beneath
###Markdown
Here again is the code that gets the top 3 predictions and picks one at random.
###Code
# Define seed text
seed_text = "help me obi-wan kinobi youre my only hope"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
# Convert the seed text to a token sequence
token_list = tokenizer.texts_to_sequences([seed_text])[0]
# Pad the sequence
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
# Feed to the model and get the probabilities for each index
probabilities = model.predict(token_list)
# Pick a random number from [1,2,3]
choice = np.random.choice([1,2,3])
# Sort the probabilities in ascending order
# and get the random choice from the end of the array
predicted = np.argsort(probabilities)[0][-choice]
# Ignore if index is 0 because that is just the padding.
if predicted != 0:
# Look up the word associated with the index.
output_word = tokenizer.index_word[predicted]
# Combine with the seed text
seed_text += " " + output_word
# Print the result
print(seed_text)
###Output
help me obi-wan kinobi youre my only hope who gave me on the man the name i eyes verdantly me mothers morn gone thing love grow cure eyes gone grey more your eyes eyes gone i gone gone or now now told its eyes eyes eyes eyes agin my eyes fearless my encumbered gone love thing my love my love my still blind sit fuel gone your gone i shoving sad here letters your slip gone sweep on your waters so sweet sounds gone your eyes lies bees peek eyes agin merry late too eyes climb much squall keep lanigans had i gone your eyes gone your gone
###Markdown
Ungraded Lab: Generating Text from Irish LyricsIn the previous lab, you trained a model on just a single song. You might have found that the output text can quickly become gibberish or repetitive. Even if you tweak the hyperparameters, the model will still be limited by its vocabulary of only 263 words. The model will be more flexible if you train it on a much larger corpus and that's what you'll be doing in this lab. You will use lyrics from more Irish songs then see how the generated text looks like. You will also see how this impacts the process from data preparation to model training. Let's get started! Imports
###Code
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
###Output
_____no_output_____
###Markdown
Building the Word VocabularyYou will first download the lyrics dataset. These will be from a compilation of traditional Irish songs and you can see them [here](https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/main/C3/W4/misc/Laurences_generated_poetry.txt).
###Code
# Download the dataset
!gdown --id 15UqmiIm0xwh9mt0IYq2z3jHaauxQSTQT
###Output
C:\Users\devas\AppData\Local\pypoetry\Cache\virtualenvs\tensorflow-1-public-cSeBuBzT-py3.7\lib\site-packages\gdown\cli.py:131: FutureWarning: Option `--id` was deprecated in version 4.3.1 and will be removed in 5.0. You don't need to pass it anymore to use a file ID.
category=FutureWarning,
Downloading...
From: https://drive.google.com/uc?id=15UqmiIm0xwh9mt0IYq2z3jHaauxQSTQT
To: D:\Source\Sandoxes\tensorflow-1-public\C3\W4\ungraded_labs\irish-lyrics-eof.txt
0%| | 0.00/69.0k [00:00<?, ?B/s]
100%|##########| 69.0k/69.0k [00:00<00:00, 68.8MB/s]
###Markdown
Next, you will lowercase and split the plain text into a list of sentences:
###Code
# Load the dataset
data = open('./irish-lyrics-eof.txt').read()
# Lowercase and split the text
corpus = data.lower().split("\n")
# Preview the result
print(corpus)
###Output
['come all ye maidens young and fair', 'and you that are blooming in your prime', 'always beware and keep your garden fair', 'let no man steal away your thyme', 'for thyme it is a precious thing', 'and thyme brings all things to my mind', 'nlyme with all its flavours, along with all its joys', 'thyme, brings all things to my mind', 'once i and a bunch of thyme', 'i thought it never would decay', 'then came a lusty sailor', 'who chanced to pass my way', 'and stole my bunch of thyme away', 'the sailor gave to me a rose', 'a rose that never would decay', 'he gave it to me to keep me reminded', 'of when he stole my thyme away', 'sleep, my child, and peace attend thee', 'all through the night', 'guardian angels god will send thee', 'soft the drowsy hours are creeping', 'hill and dale in slumber sleeping', 'i my loving vigil keeping', 'while the moon her watch is keeping', 'while the weary world is sleeping', 'oer thy spirit gently stealing', 'visions of delight revealing', 'breathes a pure and holy feeling', 'though i roam a minstrel lonely', 'my true harp shall praise sing only', 'loves young dream, alas, is over', 'yet my strains of love shall hover', 'near the presence of my lover', 'hark, a solemn bell is ringing', 'clear through the night', 'thou, my love, art heavenward winging', 'home through the night', 'earthly dust from off thee shaken', 'soul immortal shalt thou awaken', 'with thy last dim journey taken', 'oh please neer forget me though waves now lie oer me', 'i was once young and pretty and my spirit ran free', 'but destiny tore me from country and loved ones', 'and from the new land i was never to see.', 'a poor emigrants daughter too frightened to know', 'i was leaving forever the land of my soul', 'amid struggle and fear my parents did pray', 'to place courage to leave oer the longing to stay.', 'they spoke of a new land far away cross the sea', 'and of peace and good fortune for my brothers and me', 'so we parted from townland with much weeping and pain', 'kissed the loved ones and the friends we would neer see again.', 'the vessel was crowded with disquieted folk', 'the escape from past hardship sustaining their hope', 'but as the last glimpse of ireland faded into the mist', 'each one fought back tears and felt strangely alone.', 'the seas roared in anger, making desperate our plight', 'and a fever came oer me that worsened next night', 'then delirium possessed me and clouded my mind', 'and i for a moment saw that land left behind.', 'i could hear in the distance my dear mothers wailing', 'and the prayers of three brothers that id see no more', 'and i felt fathers tears as he begged for forgiveness', 'for seeking a new life on the still distant shore.', 'over in killarney', 'many years ago,', 'me mither sang a song to me', 'in tones so sweet and low.', 'just a simple little ditty,', 'in her good ould irish way,', 'and ld give the world if she could sing', 'that song to me this day.', 'too-ra-loo-ra-loo-ral, too-ra-loo-ra-li,', 'too-ra-loo-ra-loo-ral, hush now, dont you cry!', 'too-ra-loo-ra-loo-ral, thats an irish lullaby.', 'oft in dreams i wander', 'to that cot again,', 'i feel her arms a-huggin me', 'as when she held me then.', 'and i hear her voice a -hummin', 'to me as in days of yore,', 'when she used to rock me fast asleep', 'outside the cabin door.', 'and who are you, me pretty fair maid', 'and who are you, me honey?', 'she answered me quite modestly:', 'i am me mothers darling.', 'with me too-ry-ay', 'fol-de-diddle-day', 'di-re fol-de-diddle', 'dai-rie oh.', 'and will you come to me mothers house,', 'when the sun is shining clearly', 'ill open the door and ill let you in', 'and divil o one would hear us.', 'so i went to her house in the middle of the night', 'when the moon was shining clearly', 'shc opened the door and she let me in', 'and divil the one did hear us.', 'she took me horse by the bridle and the bit', 'and she led him to the stable', 'saying theres plenty of oats for a soldiers horse,', 'to eat it if hes able.', 'then she took me by the lily-white hand', 'and she led me to the table', 'saying: theres plenty of wine for a soldier boy,', 'to drink it if youre able.', 'then i got up and made the bed', 'and i made it nice and aisy', 'then i got up and laid her down', 'saying: lassie, are you able?', 'and there we lay till the break of day', 'and divil a one did hear us', 'then i arose and put on me clothes', 'saying: lassie, i must leave you.', 'and when will you return again', 'and when will we get married', 'when broken shells make christmas bells', 'we might well get married', 'in 1803 we sailed out to sea', 'out from the sweet town of derry', 'for australia bound if we didnt all drown', 'and the marks of our fetters we carried.', 'in the rusty iron chains we sighed for our wains', 'as our good wives we left in sorrow.', 'as the mainsails unfurled our curses we hurled', 'on the english and thoughts of tomorrow.', 'oh oh oh oh i wish i was back home in derry.', 'i cursed them to hell as our bow fought the swell.', 'our ship danced like a moth in the firelights.', 'white horses rode high as the devil passed by', 'taking souls to hades by twilight.', 'five weeks out to sea we were now forty-three', 'our comrades we buried each morning.', 'in our own slime we were lost in a time.', 'endless night without dawning.', 'van diemans land is a hell for a man', 'to live out his life in slavery.', 'when the climate is raw and the gun makes the law.', 'neither wind nor rain cares for bravery.', 'twenty years have gone by and ive ended me bond', 'and comrades ghosts are behind me.', 'a rebel i came and iii die the same.', 'on the cold winds of night you will find me', 'on the banks of the roses, my love and i sat down', 'and i took out my violin to play my love a tune', 'in the middle of the tune, o she sighed and she said', 'o johnny, lovely johnny, would you leave me', 'o when i was a young man, i heard my father say', 'that hed rather see me dead and buried in the clay', 'sooner than be married to any runaway', 'by the lovely sweet banks of the roses', 'o then i am no runaway and soon ill let them know', 'i can take a good glass or leave it alone', 'and the man that doesnt like me, he can keep', 'his daughter home', 'and young johnny will go roving with another', 'and if ever i get married, twill be in the month of may', 'when the leaves they are green and the meadows', 'they are gay', 'and i and my true love can sit and sport and play', 'on the lovely sweet banks of the roses', 'but black is the colour of my true loves hair.', 'his face is like some rosy fair,', 'the prettiest face and the neatest hands,', 'i love the ground whereon he stands.', 'i love my love and well he knows', 'i love the ground whereon he goes', 'if you no more on earth i see,', 'i cant serve you as you have me.', 'the winters passed and the leaves are green', 'the time is passed that we have seen,', 'but still i hope the time will come', 'when you and i shall be as one.', 'i go to the clyde for to mourn and weep,', 'but satisfied i never could sleep,', 'ill write to you a few short lines', 'ill suffer death ten thousand times.', 'so fare you well, my own true love', 'the time has passed, but i wish you well.', 'when you and i will be as one.', 'i love the ground whereon he goes,', 'the prettiest face, the neatest hands', 'her eyes they shone like the diamonds', 'youd think she was queen of the land', 'and her hair hung over her shoulder', 'tied up with a black velvet band.', 'in a neat little town they call belfast', 'apprenticed to trade i was bound', 'and many an hours sweet happiness', 'i spent in that neat little town.', 'till bad misfortune came oer me', 'that caused me to stray from the land', 'far away from my friends and relations', 'to follow the black velvet band.', 'well, i was out strolling one evening', 'not meaning to go very far', 'when i met with a pretty young damsel', 'who was selling her trade in the bar.', 'when i watched, she took from a customer', 'and slipped it right into my hand', 'then the watch came and put me in prison', 'bad luck to the black velvet band.', 'next morning before judge and jury', 'for a trial i had to appear', 'and the judge, he said, you young fellows...', 'the case against you is quite clear', 'and seven long years is your sentence', 'youre going to van diemans land', 'far away from your friends and relations', 'so come all you jolly young fellows', 'id have you take warning by me', 'whenever youre out on the liquor, me lads,', 'beware of the pretty colleen.', 'shell fill you with whiskey and porter', 'until youre not able to stand', 'and the very next thing that youll know, me lads,', 'youre landed in van diemans land.', 'heres a health to you, bonnie kellswater', 'for its there youll find the pleasures of life', 'and its there youll find a fishing and farming', 'and a bonnie wee girl for your wife', 'on the hills and the glens and the valleys', 'grows the softest of women so fine', 'and the flowers are all dripping with honey', 'there lives martha, a true love of mine', 'bonnie martha, youre the first girl i courted', 'youre the one put my heart in a snare', 'and if ever i should lose you to another', 'i will leave my kellswater so fair', 'for this one and that one may court her', 'but no other can take her from me', 'for i love her as i love my kellswater', 'like the primrose is loved by the bee', 'oh bridgit omalley, you left my heart shaken', 'with a hopeless desolation, id have you to know', 'its the wonders of admiration your quiet face has taken', 'and your beauty will haunt me wherever i go.', 'the white moon above the pale sands, the pale stars above the thorn tree', 'are cold beside my darling, but no purer than she', 'i gaze upon the cold moon till the stars drown in the warm sea', 'and the bright eyes of my darling are never on me.', 'my sunday it is weary, my sunday it is grey now', 'my heart is a cold thing, my heart is a stone', 'all joy is dead within me, my life has gone away now', 'for another has taken my love for his own.', 'the day it is approaching when we were to be married', 'and its rather i would die than live only to grieve', 'oh meet me, my darling, eer the sun sets oer the barley', 'and ill meet you there on the road to drumslieve.', 'oh bridgit omalley, youve left my heart shaken', 'i wish i was in carrigfergus', 'only for nights in ballygrant', 'i would swim over the deepest ocean', 'for my love to find', 'but the sea is wide and i cannot cross over', 'and neither have i the wings to fly', 'i wish i could meet a handsome boatsman', 'to ferry me over, to my love and die.', 'my childhood days bring back sad reflections', 'of happy times i spent so long ago', 'my boyhood friends and my own relations', 'have all passed on now like melting snow.', 'but ill spend my days in endless roaming', 'soft is the grass, my bed is free.', 'ah, to be back now in carrigfergus', 'on that long road down to the sea.', 'but in kilkenny, it is reported', 'on marble stones there as black as ink', 'with gold and silver i would support her', 'but ill sing no more till i get a drink.', 'for im drunk today, and im seldom sober', 'a handsome rover from town to town', 'ah, but im sick now, my days are numbered', 'you may travel far far from your own native land', 'far away oer the mountains, far away oer the foam', 'but of all the fine places that ive ever been', 'sure theres none can compare with the cliffs of doneen.', 'take a view oer the mountains, fine sights youll see there', 'youll see the high rocky mountains oer the west coast of clare', 'oh the town of kilkee and kilrush can be seen', 'from the high rocky slopes round the cliffs of doneen.', 'its a nice place to be on a fine summers day', 'watching all the wild flowers that neer do decay', 'oh the hares and lofty pheasants are plain to be seen', 'making homes for their young round the cliffs of doneen.', 'fare thee well to doneen, fare thee well for a while', 'and to all the kind people im leaving behind', 'to the streams and the meadows where late i have been', 'and the high rocky slopes round the cliffs of doneen.', 'in dublins fair city, where the girls are so pretty', 'i first set my eyes on sweet molly malone', 'as she wheeled her wheel-barrow', 'through streets broad and narrow', 'crying cockles and mussels, alive, alive-o!', 'alive, alive-o! alive, alive-o!', 'she was a fish-monger, but sure twas no wonder', 'for so were her father and mother before', 'and they each wheeled their barrow', 'she died of a fever, and no one could save her', 'and that was the end of sweet molly malone', 'but her ghost wheels her barrow', 'the garden of eden has vanished, they say', 'but i know the lie of it still;', 'just turn to the left at the bridge of finea', 'and stop when halfway to cootehill.', 'tis there i will find it,', 'i know sure enough', 'when fortune has come to me call,', 'oh the grass it is green around ballyjamesduff', 'and the blue sky is over it all.', 'and tones that are tender and tones that are gruff', 'are whispering over the sea,', 'come back, paddy reilly to ballyjamesduff', 'come home, paddy reilly, to me.', 'my mother once told me that when i was born', 'the day that i first saw the light,', 'i looked down the street on that very first morn', 'and gave a great crow of delight.', 'now most newborn babies appear in a huff,', 'and start with a sorrowful squall,', 'but i knew i was born in ballyjamesduff', 'and thats why i smiled on them all.', 'the babys a man, now hes toil-worn and tough', 'still, whispers come over the sea,', 'the night that we danced by the light of the moon,', 'wid phil to the fore wid his flute,', 'when phil threw his lip over come again soon,', 'hes dance the foot out o yer boot!', 'the day that i took long magee by the scruff', 'for slanderin rosie kilrain,', 'then, marchin him straight out of ballyjamesduff,', 'assisted him into a drain.', 'oh, sweet are the dreams, as the dudeen i puff,', 'of whisperings over the sea,', 'ive loved the young women of every land,', 'that always came easy to me;', 'just barrin the belles of the black-a-moor brand', 'and the chocolate shapes of feegee.', 'but that sort of love is a moonshiny stuff,', 'and never will addle me brain,', 'for the bells will be ringin in ballyjamesduff', 'for me and me rosie kilrain!', 'and through all their glamour, their gas and their guff', 'a whisper comes over the sea,', 'ive struck oil at last!', 'ive struck work, and i vow', 'ive struck some remarkable clothes,', 'ive struck a policeman for sayin that now,', 'id go back to my beautiful rose.', 'the belles they may blarney,', 'the boys they may bluff', 'but this i will always maintain,', 'no place in the world like ballyjamesduff', 'no guril (sic) like rosie kilrain.', 'ive paid for my passage, the sea may be rough', 'but borne on each breeze there will be,', 'will you come to the bower oer the free boundless ocean', 'where the stupendous waves roll in thundering motion,', 'where the mermaids are seen and the fierce tempest gathers,', 'to loved erin the green, the dear land of our fathers.', 'will you come, will you, will you, will you come to the bower?', 'will you come to the land of oneill and odonnell', 'of lord lucan of old and immortal oconnell.', 'where brian drove the danes and saint patrick the vermin', 'and whose valleys remain still most beautiful and charming?', 'you can visit benburb and the storied blackwater,', 'where owen roe met munroe and his chieftains did slaughter', 'where the lambs skip and play on the mossy all over,', 'from those bright golden views to enchanting rostrevor.', 'you can see dublin city, and the fine groves of blarney', 'the bann, boyne, and liffey and the lakes of killarney,', 'you may ride on the tide on the broad majestic shannon', 'you may sail round loch neagh and see storied dungannon.', 'you can visit new ross, gallant wexford, and gorey,', 'where the green was last seen by proud saxon and tory,', 'where the soil is sanctified by the blood of each true man', 'where they died satisfied that their enemies they would not run from.', 'will you come and awake our lost land from its slumber', 'and her fetters well break, links that long are encumbered.', 'and the air will resound with hosannahs to greet you', 'on the shore will be found gallant irishmen to greet you.', 'oh danny boy, the pipes, the pipes are calling', 'from glen to glen, and down the mountain side', 'the summers gone, and all the flowers are dying', 'tis you, tis you must go and i must bide.', 'but come ye back when summers in the meadow', 'or when the valleys hushed and white with snow', 'tis ill be here in sunshine or in shadow', 'oh danny boy, oh danny boy, i love you so.', 'and if you come, when all the flowers are dying', 'and i am dead, as dead i well may be', 'youll come and find the place where i am lying', 'and kneel and say an ave there for me.', 'and i shall hear, tho soft you tread above me', 'and all my dreams will warm and sweeter be', 'if youll not fail to tell me that you love me', 'ill simply sleep in peace until you come to me.', 'i found my love by the gasworks croft', 'dreamed a dream by the old canal', 'kissed my girl by the factory wall', 'dirty old town, dirty old town.', 'clouds are drifting across the moon', 'cats are prowling on their beat', 'springs a girl in the street at night', 'i heard a siren from the docks', 'saw a train set the night on fire', 'smelled the spring in the smokey wind', 'im going to make a good sharp axe', 'shining steel tempered in the fire', 'well chop you down like an old dead tree', 't was down by the salley gardens, my love and i did meet.', 'she crossed the salley gardens with little snow-white feet.', 'she bid me take love easy, as the leaves grow on the tree,', 'but i was young and foolish, and with her did not agree.', 'in a field down by the river, my love and i did stand', 'and on my leaning shoulder, she laid her snow-white hand.', 'she bid me take life easy , as the grass grows on the weirs', 'but i was young and foolish, and now am full of tears.', 'down by the salley gardens, my love and i did meet.', 'when, like the dawning day', 'eileen aroon', 'love sends his early ray', 'eileen aroon.', 'what makes his dawning glow', 'changeless through joy and woe', 'only the constant know', 'were she no longer true', 'what would her lover do', 'fly with a broken chain', 'far oer the bounding main', 'never to love again', 'youth must in time decay', 'beauty must fade away', 'castles are sacked in war', 'chieftains are scattered far', 'truth is a fixed star', 'believe me, if all those endearing young charms', 'which i gaze on so fondly today', 'were to change by tomorrow and fleet in my arms', 'like fairy gifts fading away.', 'thou wouldst still be adored as this moment thou art', 'let thy loveliness fade as it will', 'and around the dear ruin each wish of my heart', 'would entwine itself verdantly still.', 'it is not while beauty and youth are thine own', 'and thy cheeks unprofaned by a tear', 'that the fervor and faith of a soul can be known', 'to which time will but make thee more dear.', 'no, the heart that has truly loved never forgets', 'but as truly loves on to the close', 'as the sunflower turns to her god when he sets', 'the same look which she turned when she rose.', 'ill tell you a story of a row in the town,', 'when the green flag went up and the crown rag came down,', 'twas the neatest and sweetest thing ever you saw,', 'and they played the best games played in erin go bragh.', 'one of our comrades was down at rings end,', 'for the honor of ireland to hold and defend,', 'he had no veteran soldiers but volunteers raw,', 'playing sweet mauser music for erin go bragh.', 'now heres to pat pearse and our comrades who died', 'tom clark, macdonagh, macdiarmada, mcbryde,', 'and heres to james connolly who gave one hurrah,', 'and placed the machine guns for erin go bragh.', 'one brave english captain was ranting that day,', 'saying, give me one hour and ill blow you away,', 'but a big mauser bullet got stuck in his craw,', 'and he died of lead poisoning in erin go bragh.', 'old ceannt and his comrades like lions at bay,', 'from the south dublin union poured death and dismay,', 'and what was their horror when the englishmen saw', 'all the dead khaki soldiers in erin go bragh.', 'now heres to old dublin, and heres her renown,', 'in the long generation her fame will go down,', 'and our children will tell how their forefathers saw,', 'the red blaze of freedom in erin go bragh.', 'of priests we can offer a charmin variety,', 'far renownd for learnin and piety;', 'still, id advance ye widout impropriety,', 'father oflynn as the flowr of them all.', 'cho: heres a health to you, father oflynn,', 'slainte and slainte and slainte agin;', 'powrfulest preacher, and tenderest teacher,', 'and kindliest creature in ould donegal.', 'dont talk of your provost and fellows of trinity,', 'famous forever at greek and latinity,', 'dad and the divils and all at divinity', 'father oflynn d make hares of them all!', 'come, i venture to give ye my word,', 'never the likes of his logic was heard,', 'down from mythology into thayology,', 'truth! and conchology if hed the call.', 'och father oflynn, youve a wonderful way wid you,', 'all ould sinners are wishful to pray wid you,', 'all the young childer are wild for to play wid you,', 'youve such a way wid you, father avick.', 'still for all youve so gentle a soul,', 'gad, youve your flock in the grandest control,', 'checking the crazy ones, coaxin onaisy ones,', 'lifting the lazy ones on wid the stick.', 'and tho quite avoidin all foolish frivolity;', 'still at all seasons of innocent jollity,', 'where was the playboy could claim an equality,', 'at comicality, father, wid you?', 'once the bishop looked grave at your jest,', 'till this remark set him off wid the rest:', 'is it lave gaiety all to the laity?', 'cannot the clergy be irishmen, too?', 'what did i have, said the fine old woman', 'what did i have, this proud old woman did say', 'i had four green fields, each one was a jewel', 'but strangers came and tried to take them from me', 'i had fine strong sons, who fought to save my jewels', 'they fought and they died, and that was my grief said she', 'long time ago, said the fine old woman', 'long time ago, this proud old woman did say', 'there was war and death, plundering and pillage', 'my children starved, by mountain, valley and sea', 'and their wailing cries, they shook the very heavens', 'my four green fields ran red with their blood, said she', 'what have i now, said the fine old woman', 'what have i now, this proud old woman did say', 'i have four green fields, one of thems in bondage', 'in strangers hands, that tried to take it from me', 'but my sons had sons, as brave as were their fathers', 'my fourth green field will bloom once again said she', 'just give me your hand,', 'tabhair dom do lã¡mh.', 'just give me your hand', 'and ill walk with you,', 'through the streets of our land,', 'through the mountains so grand.', 'if you give me your hand.', 'and come along with me.', 'will you give me your hand,', 'and the world it can see,', 'that we can be free,', 'in peace and harmony?', 'from the north to the south.', 'from the east to the west.', 'every mountain, every valley,', 'every bush and birds nest!', 'for the world it is ours.', 'all the sea and the land,', 'to destroy or command,', 'in a gesture of peace.', 'will you give me your hand', 'and all troubles will cease,', 'for the strong and the weak,', 'for the rich and the poor?', 'all peoples and creeds,', 'lets meet their needs.', 'with a passion, we can fashion,', 'a new world of love!', 'by day and night,', 'through all struggle and strife,', 'and beside you, to guide you,', 'forever, my love.', 'for loves not for one,', 'but for both of us to share.', 'for our country so fair,', 'for our world and whats there.', 'green grow the lilacs, all sparkling with dew', 'im lonely, my darling, since parting with you;', 'but by our next meeting iull hope to prove true', 'and change the green lilacs to the red, white and blue.', 'i once had a sweetheart, but now i have none', 'shes gone and shes left me, i care not for one', 'since shes gone and left me, contented ill be,', 'for she loves another one better than me.', 'i passed my loves window, both early and late', 'the look that she gave me, it makes my heart ache;', 'oh, the look that she gave me was painful to see,', 'i wrote my love letters in rosy red lines,', 'she sent me an answer all twisted and twined;', 'saying,keep your love letters and i will keep mine', 'just you write to your love and ill write to mine.', 'oh haste to the wedding, the pipes, the pipes are calling', 'oh haste to the wedding, oh haste to the wedding, i love you so.', 'ill take you home again, kathleen', 'across the ocean wild and wide', 'to where your heart has ever been', 'since you were first my bonnie bride.', 'the roses all have left your cheek.', 'ive watched them fade away and die', 'your voice is sad when eer you speak', 'and tears bedim your loving eyes.', 'oh! i will take you back, kathleen', 'to where your heart will feel no pain', 'and when the fields are fresh and green', 'ill take you to your home again!', 'i know you love me, kathleen, dear', 'your heart was ever fond and true.', 'i always feel when you are near', 'that life holds nothing, dear, but you.', 'the smiles that once you gave to me', 'i scarcely ever see them now', 'though many, many times i see', 'a darkning shadow on your brow.', 'to that dear home beyond the sea', 'my kathleen shall again return.', 'and when thy old friends welcome thee', 'thy loving heart will cease to yearn.', 'where laughs the little silver stream', 'beside your mothers humble cot', 'and brightest rays of sunshine gleam', 'there all your grief will be forgot.', 'ill tell my ma when i go home', 'the boys wont leave the girls alone', 'they pulled my hair, they stole my comb', 'but thats all right till i go home.', 'she is handsome, she is pretty', 'she is the bell of belfast city', 'she is counting one, two, three', 'please wont you tell me who is she.', 'albert mooney says he loves her', 'all the boys are fighting for her', 'they knock at the door and they ring at the bell', 'sayin oh my true love, are you well?', 'out she comes as white as snow', 'rings on her fingers and bells on her toes', 'old john murray says shell die', 'if she doesnt get the fellow with the roving eye.', 'let the wind and rain and the hail blow high', 'and the snow come tumblin from the sky', 'shes as nice as apple pie', 'shell get her own lad by and by.', 'when she gets a lad of her own', 'she wont tell her ma when she goes home', 'let them all come as they will', 'for its albert mooney she loves still.', 'while goin the road to sweet athy, ', 'hurroo, hurroo', 'while goin the road to sweet athy', 'a stick in me hand and a drop in me eye', 'a doleful damsel i heard cry,', 'johnny i hardly knew ye.', 'with your drums and guns and drums and guns', 'the enemy nearly slew ye', 'oh my darling dear, ye look so queer', 'where are your eyes that were so mild', 'when my heart you so beguiled', 'why did ye run from me and the child', 'oh johnny, i hardly knew ye.', 'where are your legs that used to run', 'when you went for to carry a gun', 'indeed your dancing days are done', 'im happy for to see ye home', 'all from the island of sulloon', 'so low in flesh, so high in bone', 'oh johnny i hardly knew ye.', 'ye havent an arm, ye havent a leg', 'yere an armless, boneless, chickenless egg', 'yell have to put with a bowl out to beg', 'theyre rolling out the guns again', 'but they never will take our sons again', 'no they never will take our sons again', 'johnny im swearing to ye.', 'as i was a-walkin round kilgary mountain', 'i met with captain pepper as his money he was countin', 'i rattled my pistols and i drew forth my saber', 'sayin, stand and deliver, for i am the bold deceiver.', 'musha rig um du rum da', 'whack fol the daddy o', 'theres whiskey in the jar.', 'the shinin golden coins did look so bright and jolly', 'i took em with me home and i gave em to my molly', 'she promised and she vowed that she never would deceive me', 'but the devils in the women and they never can be easy.', 'when i was awakened between six and seven', 'the guards were all around me in numbers odd and even', 'i flew to my pistols, but alas i was mistaken', 'for mollys drawn my pistols and a prisoner i was taken.', 'they put me into jail without judge or writin', 'for robbing colonel pepper on kilgary mountain', 'but they didnt take my fists so i knocked the sentry down', 'and bid a fond farewell to the jail in sligo town.', 'now some take delight in fishin and in bowlin', 'and others take delight in carriages a-rollin', 'but i take delight in the juice of the barley', 'and courtin pretty girls in the morning so early.', 'oer railroad ties and crossings', 'i made my weary way,', 'through swamps and elevations', 'my tired feet did stray', 'until i resolved at sunset', 'some higher ground to win.', 'twas there i met with a creole girl', 'by the lake of ponchartrain.', 'good evening, fair maiden,', 'my money does me no good.', 'if it want for the allegators', 'id stay out in the wood.', 'youre welcome, welcome, stranger.', 'at home it is quite plain', 'for we never turn a stranger', 'from the lake of ponchartrain.', 'she took me to her mothers home', 'and she treated me quite well;', 'her long black hair in ringlets', 'upon her shoulders fell.', 'i tried to paint her picture', 'but, alas, it was in vain', 'so handsome was that creole girl', 'i asked her if shed marry me', 'she said that neer could be;', 'she said she had a lover,', 'and he was on the sea,', 'she said she had a lover', 'it was true she would remain,', 'until he returned for the creole girl', 'adieu, adieu, fair maiden,', 'you neer shall see me more', 'and when you are thinking of the old times', 'and the cottage by the shore', 'and when i meet a sociable', 'with a glass of the foaming main', 'ill drink good health to the creole girl', 'n the town of athy one jeremy lanigan', 'battered away til he hadnt a pound.', 'his father died and made him a man again', 'left him a farm and ten acres of ground.', 'he gave a grand party for friends and relations', 'who didnt forget him when come to the wall,', 'and if youll but listen ill make your eyes glisten', 'of the rows and the ructions of lanigans ball.', 'myself to be sure got free invitation,', 'for all the nice girls and boys i might ask,', 'and just in a minute both friends and relations', 'were dancing round merry as bees round a cask.', 'judy odaly, that nice little milliner,', 'she tipped me a wink for to give her a call,', 'and i soon arrived with peggy mcgilligan', 'just in time for lanigans ball.', 'there were lashings of punch and wine for the ladies,', 'potatoes and cakes; there was bacon and tea,', 'there were the nolans, dolans, ogradys', 'courting the girls and dancing away.', 'songs they went round as plenty as water,', 'the harp that once sounded in taras old hall,', 'sweet nelly gray and the rat catchers daughter,', 'all singing together at lanigans ball.', 'they were doing all kinds of nonsensical polkas', 'all round the room in a whirligig.', 'julia and i, we banished their nonsense', 'and tipped them the twist of a reel and a jig.', '&och mavrone, how the girls got all mad at me', 'danced til youd think the ceiling would fall.', 'for i spent three weeks at brooks academy', 'learning new steps for lanigans ball.', 'three long weeks i spent up in dublin,', 'three long weeks to learn nothing at all,', 'she stepped out and i stepped in again,', 'i stepped out and she stepped in again,', 'boys were all merry and the girls they were hearty', 'and danced all around in couples and groups,', 'til an accident happened, young terrance mccarthy', 'put his right leg through miss finnertys hoops.', 'poor creature fainted and cried: meelia murther,', 'called for her brothers and gathered them all.', 'carmody swore that hed go no further', 'til he had satisfaction at lanigans ball.', 'in the midst of the row miss kerrigan fainted,', 'her cheeks at the same time as red as a rose.', 'some of the lads declared she was painted,', 'she took a small drop too much, i suppose.', 'her sweetheart, ned morgan, so powerful and able,', 'when he saw his fair colleen stretched out by the wall,', 'tore the left leg from under the table', 'and smashed all the chaneys at lanigans ball.', 'boys, oh boys, twas then there were runctions.', 'myself got a lick from big phelim mchugh.', 'i soon replied to his introduction', 'and kicked up a terrible hullabaloo.', 'old casey, the piper, was near being strangled.', 'they squeezed up his pipes, bellows, chanters and all.', 'the girls, in their ribbons, they got all entangled', 'and that put an end to lanigans ball.', 'step we gaily, on we go', 'heel for heel and toe for toe,', 'arm in arm and row on row', 'all for mairis wedding.', 'over hillways up and down', 'myrtle green and bracken brown,', 'past the sheilings through the town', 'all for sake of mairi.', 'red her cheeks as rowans are', 'bright her eyes as any star,', 'fairest o them all by far', 'is our darlin mairi.', 'plenty herring, plenty meal', 'plenty peat to fill her creel,', 'plenty bonny bairns as weel', 'thats the toast for mairi.', 'i have seen the lark soar high at morn', 'heard his song up in the blue', 'i have heard the blackbird pipe his note', 'the thrush and the linnet too', 'but theres none of them can sing so sweet', 'my singing bird as you.', 'if i could lure my singing bird', 'from his own cozy nest', 'if i could catch my singing bird', 'i would warm him on my breast', 'for theres none of them can sing so sweet', 'of all the money that eer i spent', 'ive spent it in good company', 'and all the harm that ever i did', 'alas it was to none but me', 'and all ive done for want of wit', 'to memory now i cant recall', 'so fill to me the parting glass', 'good night and joy be with you all', 'if i had money enough to spend', 'and leisure to sit awhile', 'there is a fair maid in the town', 'that sorely has my heart beguiled', 'her rosy cheeks and ruby lips', 'i own she has my heart enthralled', 'oh, all the comrades that eer i had', 'theyre sorry for my going away', 'and all the sweethearts that eer i had', 'theyd wish me one more day to stay', 'but since it falls unto my lot', 'that i should rise and you should not', 'ill gently rise and softly call', 'it was on a fine summers morning,', 'when the birds sweetly tuned on each bough;', 'i heard a fair maid sing most charming', 'as she sat a-milking her cow;', 'her voice, it was chanting melodious,', 'she left me scarce able to go;', 'my heart it is soothed in solace,', 'my cailã\xadn deas crãºite na mbã³.', 'with courtesy i did salute her,', 'good-morrow, most amiable maid,', 'im your captive slave for the future.', 'kind sir, do not banter, she said,', 'im not such a precious rare jewel,', 'that i should enamour you so;', 'i am but a plain country girl,', 'says cailã\xadn deas crãºite na mbã³.', 'the indies afford no such jewel,', 'so precious and transparently fair,', 'oh! do not to my flame add fuel,', 'but consent for to love me, my dear;', 'take pity and grant my desire,', 'and leave me no longer in woe;', 'oh! love me or else ill expire,', 'sweet cailã\xadn deas crãºite na mbã³.', 'or had i the wealth of great damer,', 'or all on the african shore,', 'or had i great devonshire treasure,', 'or had i ten thousand times more,', 'or had i the lamp of alladin,', 'or had i his genie also,', 'id rather live poor on a mountain,', 'with cailã\xadn deas crãºite na mbã³.', 'i beg youll withdraw and dont tease me;', 'i cannot consent unto thee.', 'i like to live single and airy,', 'till more of the world i do see.', 'new cares they would me embarrass,', 'besides, sir, my fortune is low,', 'until i get rich ill not marry,', 'an old maid is like an old almanack,', 'quite useless when once out of date;', 'if her ware is not sold in the morning', 'at noon it must fall to low rate.', 'the fragrance of may is soon over,', 'the rose loses its beauty, you know;', 'all bloom is consumed in october,', 'a young maid is like a ship sailing,', 'theres no knowing how long she may steer,', 'for with every blast shes in danger;', 'oh! consent, love, and banish all care.', 'for riches i care not a farthing,', 'your affection i want and no more;', 'in comfort id wish to enjoy you,', 'red is the rose that in yonder garden grows', 'fair is the lily of the valley', 'clear is the water that flows from the boyne', 'but my love is fairer than any.', 'come over the hills, my bonnie irish lass', 'come over the hills to your darling', 'you choose the rose, love, and ill make the vow', 'and ill be your true love forever.', 'twas down by killarneys green woods that we strayed', 'when the moon and the stars they were shining', 'the moon shone its rays on her locks of golden hair', 'and she swore shed be my love forever.', 'its not for the parting that my sister pains', 'its not for the grief of my mother', 'tis all for the loss of my bonny irish lass', 'that my heart is breaking forever.', 'in the merry month of june from me home i started,', 'left the girls of tuam so sad and broken hearted,', 'saluted father dear, kissed me darling mother,', 'drank a pint of beer, me grief and tears to smother,', 'then off to reap the corn, leave where i was born,', 'cut a stout black thorn to banish ghosts and goblins;', 'bought a pair of brogues rattling oer the bogs', 'and frightning all the dogs on the rocky road to dublin.', 'one, two, three four, five, hunt the hare and turn her down the rocky', 'road and all the way to dublin, whack follol de rah !', 'in mullingar that night i rested limbs so weary, started by daylight', 'next morning blithe and early, took a drop of pure to keep me heartfrom sinking;', 'thats a paddys cure whenever hes on drinking. see the lassies smile, laughing', 'all the while at me curious style, twould set your heart a bubblin', 'asked me was i hired, wages i required, i was almost tired of the', 'rocky road to dublin.', 'in dublin next arrived, i thought it be a pity', 'to be soon deprived a view of that fine city.', 'so then i took a stroll, all among the quality;', 'me bundle it was stole, all in a neat locality.', 'something crossed me mind, when i looked behind,', 'no bundle could i find upon me stick a wobblin', 'enquiring for the rogue, they said me connaught brogue', 'wasnt much in vogue on the rocky road to dublin.', 'from there i got away, me spirits never falling,', 'landed on the quay, just as the ship was sailing.', 'the captain at me roared, said that no room had he;', 'when i jumped aboard, a cabin found for paddy.', 'down among the pigs, played some hearty rigs,', 'danced some hearty jigs, the water round me bubbling;', 'when off holyhead wished meself was dead,', 'or better for instead on the rocky road to dublin.', 'well the bouys of liverpool, when we safely landed,', 'called meself a fool, i could no longer stand it.', 'blood began to boil, temper i was losing;', 'poor old erins isle they began abusing.', 'hurrah me soul says i, me shillelagh i let fly.', 'some galway boys were nigh and saw i was a hobble in,', 'with a load hurray ! joined in the affray.', 'we quitely cleared the way for the rocky road to dublin.', 'road and all the way to dublin, whack fol all the ra !', 'o see the fleet-foot host of men, who march with faces drawn,', 'from farmstead and from fishers cot, along the banks of ban;', 'they come with vengeance in their eyes. too late! too late are they,', 'for young roddy mccorley goes to die on the bridge of toome today.', 'oh ireland, mother ireland, you love them still the best', 'the fearless brave who fighting fall upon your hapless breast,', 'but never a one of all your dead more bravely fell in fray,', 'than he who marches to his fate on the bridge of toome today.', 'up the narrow street he stepped, so smiling, proud and young.', 'about the hemp-rope on his neck, the golden ringlets clung;', 'theres neer a tear in his blue eyes, fearless and brave are they,', 'as young roddy mccorley goes to die on the bridge of toome today.', 'when last this narrow street he trod, his shining pike in hand', 'behind him marched, in grim array, a earnest stalwart band.', 'to antrim town! to antrim town, he led them to the fray,', 'but young roddy mccorley goes to die on the bridge of toome today.', 'the grey coat and its sash of green were brave and stainless then,', 'a banner flashed beneath the sun over the marching men;', 'the coat hath many a rent this noon, the sash is torn away,', 'and roddy mccorley goes to die on the bridge of toome today.', 'oh, how his pike flashed in the sun! then found a foemans heart,', 'through furious fight, and heavy odds he bore a true mans part', 'and many a red-coat bit the dust before his keen pike-play,', 'but roddy mccorley goes to die on the bridge of toome today.', 'theres never a one of all your dead more bravely died in fray', 'than he who marches to his fate in toomebridge town today;', 'true to the last! true to the last, he treads the upwards way,', 'and young roddy mccorley goes to die on the bridge of toome today.', 'ive traveled all over this world', 'and now to another i go', 'and i know that good quarters are waiting', 'to welcome old rosin the bow', 'to welcome old rosin the bow.', 'when im dead and laid out on the counter', 'a voice you will hear from below', 'saying send down a hogshead of whiskey', 'to drink with old rosin the bow', 'to drink with old rosin the bow.', 'then get a half dozen stout fellows', 'and stack them all up in a row', 'let them drink out of half gallon bottles', 'to the memory of rosin the bow', 'to the memory of rosin the bow.', 'then get this half dozen stout fellows', 'and let them all stagger and go', 'and dig a great hole in the meadow', 'and in it put rosin the bow', 'and in it put rosin the bow.', 'then get ye a couple of bottles', 'put one at me head and me toe', 'with a diamond ring scratch upon them', 'the name of old rosin the bow', 'the name of old rosin the bow.', 'ive only this one consolation', 'as out of this world i go', 'i know that the next generation', 'will resemble old rosin the bow', 'will resemble old rosin the bow.', 'i fear that old tyrant approaching', 'that cruel remorseless old foe', 'and i lift up me glass in his honor', 'take a drink with old rosin the bow', 'take a drink with old rosin the bow.', 'he was stranded in a tiny town on fair prince edward isle', 'waiting for a ship to come and find him', 'a one horse place, a friendly face, some coffee and a tiny trace', 'of fiddlin in the distance far behind him', 'a dime across the counter then, a shy hello, a brand new friend', 'a walk along the street in the wintry weather', 'a yellow light, an open door, and a welcome friend, theres room for more', 'and then theyre standing there inside together', 'he said, ive heard that tune before somewhere but i cant remember when,', 'was it on some other friendly shore, did i hear it on the wind', 'was it written on the sky above, i think i heard it from someone i love', 'but i never heard a sound so sweet since then', 'and now his feet begin to tap, a little boy says, ill take your hat.', 'hes caught up in the magic of her smile', 'leap, the heart inside him went, and off across the floor he sent', 'his clumsy body, graceful as a child', 'he said, theres magic in the fiddlers arms and theres magic in this town', 'theres magic in the dancers feet and the way they put them down', 'people smiling everywhere, boots and ribbons, locks of hair', 'laughtcr, old blue suits and easter gowns', 'the sailors gone, the room is bare, the old pianos setting there', 'someones hats left hanging on the rack', 'the empty chair, the wooden floor that feels the touch of shoes no more', 'awaitin for the dancers to come back', 'and thc fiddles in the closet of some daughter of the town', 'the strings are broke, tbe bow is gone and the covers buttoned down', 'but sometimes on december nights, when the air is cold and the wind is right', 'theres a melody that passes through the town.', 'my young love said to me, my mother wont mind', 'and my father wont slight you for your lack of kind.', 'and she stepped away from me and this she did say', 'it will not be long, love, till our wedding day.', 'as she stepped away from me and she moved through the fair', 'and fondly i watched her move here and move there', 'and then she turned homeward with one star awake', 'like the swan in the evening moves over the lake.', 'the people were saying, no two eer were wed', 'but one had a sorrow that never was said', 'and i smiled as she passed with her goods and her gear', 'and that was the last that i saw of my dear.', 'last night she came to me, my dead love came in', 'so softly she came that her feet made no din', 'as she laid her hand on me and this she did say:', 'it will not be long, love, til our wedding day.', 'oh father dear, i oft-times hear you speak of erins isle', 'her lofty hills, her valleys green, her mountains rude and wild', 'they say she is a lovely land wherein a saint might dwell', 'so why did you abandon her, the reason to me tell.', 'oh son, i loved my native land with energy and pride', 'till a blight came oer the praties; my sheep, my cattle died', 'my rent and taxes went unpaid, i could not them redeem', 'and thats the cruel reason why i left old skibbereen.', 'oh well do i remember that bleak december day', 'the landlord and the sheriff came to take us all away', 'they set my roof on fire with their cursed english spleen', 'i heaved a sigh and bade goodbye to dear old skibbereen.', 'your mother too, god rest her soul, fell on the stony ground', 'she fainted in her anguish seeing desolation round', 'she never rose but passed away from life to immortal dream', 'she found a quiet grave, me boy, in dear old skibbereen.', 'and you were only two years old and feeble was your frame', 'i could not leave you with my friends for you bore your fathers name', 'i wrapped you in my c�ta m�r in the dead of night unseen', 'oh father dear, the day will come when in answer to the call', 'all irish men of freedom stern will rally one and all', 'ill be the man to lead the band beneath the flag of green', 'and loud and clear well raise the cheer, revenge for skibbereen!', 'be thou my vision, o lord of my heart', 'naught be all else to me save that thou art', 'thou my best thought by day or by night', 'waking or sleeping thy presence my light.', 'be thou my wisdom, thou my true word', 'i ever with thee, thou with me, lord', 'thou my great father, i thy true son', 'thou in me dwelling, and i with thee one.', 'be thou my battleshield, sword for the fight', 'be thou my dignity, thou my delight', 'thou my souls shelter, thou my high tower', 'raise thou me heavenward, o power of my power.', 'riches i heed not, nor mans empty praise', 'thou mine inheritance, now and always', 'thou and thou only, first in my heart', 'high king of heavem, my treasure thou art.', 'high king of heaven, after victory won', 'may i reach heavens joys, o bright heavens sun', 'heart of my own heart, whatever befall', 'still be my vision, o ruler of all.', 'last night as i lay dreaming of pleasant days gone by', 'my mind being bent on rambling to ireland i did fly', 'i stepped on board a vision and i followed with the wind', 'and i shortly came to anchor at the cross of spancil hill', 'it being the 23rd june the day before the fair', 'when lrelands sons and daughters in crowds assembled there', 'the young and the old, the brave and the bold their journey to fulfill', 'there were jovial conversations at the fair of spancil hill', 'i went to see my neighbors to hear what they might say', 'the old ones were all dead and gone and the young ones turning grey', 'i met with the tailor quigley, hes a bould as ever still', 'sure he used to make my britches when i lived in spancil hill', 'i paid a flying visit to my first and only love', 'shes as white as any lily and as gentle as a dove', 'she threw her arms around me saying johnny i love you still', 'oh shes ned the farmers daughter and the flower of spancil hiii', 'i dreamt i held and kissed her as in the days of yore', 'she said, johnny youre only joking like manys the time before', 'the cock he crew in the morning he crew both loud and shrill', 'and i awoke in california, many miles from spancil hill.', 'near banbridge town, in the county down', 'one morning in july', 'down a boreen green came a sweet colleen', 'and she smiled as she passed me by.', 'she looked so sweet from her two white feet', 'to the sheen of her nut-brown hair', 'such a coaxing elf, id to shake myself', 'to make sure i was standing there.', 'from bantry bay up to derry quay', 'and from galway to dublin town', 'no maid ive seen like the sweet colleen', 'that i met in the county down.', 'as she onward sped i shook my head', 'and i gazed with a feeling rare', 'and i said, says i, to a passerby', 'whos the maid with the nut-brown hair?', 'he smiled at me, and with pride says he,', 'thats the gem of irelands crown.', 'shes young rosie mccann from the banks of the bann', 'shes the star of the county down.', 'ive travelled a bit, but never was hit', 'since my roving career began', 'but fair and square i surrendered there', 'to the charms of young rose mccann.', 'id a heart to let and no tenant yet', 'did i meet with in shawl or gown', 'but in she went and i asked no rent', 'from the star of the county down.', 'at the crossroads fair ill be surely there', 'and ill dress in my sunday clothes', 'and ill try sheeps eyes, and deludhering lies', 'on the heart of the nut-brown rose.', 'no pipe ill smoke, no horse ill yoke', 'though with rust my plow turns brown', 'till a smiling bride by my own fireside', 'sits the star of the county down.', 'it was early, early in the spring', 'the birds did whistle and sweetly sing', 'changing their notes from tree to tree', 'and the song they sang was old ireland free.', 'it was early early in the night,', 'the yeoman cavalry gave me a fright', 'the yeoman cavalry was my downfall', 'and i was taken by lord cornwall.', 'twas in the guard-house where i was laid,', 'and in a parlour where i was tried', 'my sentence passed and my courage low', 'when to dungannon i was forced to go.', 'as i was passing my fathers door', 'my brother william stood at the door', 'my aged father stood at the door', 'and my tender mother her hair she tore.', 'as i was going up wexford street', 'my own first cousin i chanced to meet;', 'my own first cousin did me betray', 'and for one bare guinea swore my life away.', 'as i was walking up wexford hill', 'who could blame me to cry my fill?', 'i looked behind, and i looked before', 'but my aged mother i shall see no more.', 'and as i mounted the platform high', 'my aged father was standing by;', 'my aged father did me deny', 'and the name he gave me was the croppy boy.', 'it was in dungannon this young man died', 'and in dungannon his body lies.', 'and you good people that do pass by', 'oh shed a tear for the croppy boy.', 'one morning early i walked forth', 'by the margin of lough leane', 'the sunshine dressed the trees in green', 'and summer bloomed again', 'i left the town and wandered on', 'through fields all green and gay', 'and whom should i meet but a colleen sweet', 'at the dawning of the day.', 'no cap or cloak this maiden wore', 'her neck and feet were bare', 'down to the grass in ringlets fell', 'her glossy golden hair', 'a milking pail was in her hand', 'she was lovely, young and gay', 'she wore the palm from venus bright', 'by the dawning of the day.', 'on a mossy bank i sat me down', 'with the maiden by my side', 'with gentle words i courted her', 'and asked her to be my bride', 'she said, young man dont bring me blame', 'and swiftly turned away', 'and the morning light was shining bright', 'by a lonely prison wall', 'i heard a sweet voice calling,', 'danny, they have taken you away.', 'for you stole travelians corn,', 'that your babes might see the morn,', 'now a prison ship lies waiting in the bay.', 'fair lie the fields of athenry', 'where once we watched the small freebirds fly.', 'our love grew with the spring,', 'we had dreams and songs to sing', 'as we wandered through the fields of athenry.', 'i heard a young man calling', 'nothing matters, jenny, when youre free', 'against the famine and the crown,', 'i rebelled, they ran me down,', 'now you must raise our children without me.', 'on the windswept harbour wall,', 'she watched the last star rising', 'as the prison ship sailed out across the sky', 'but shell watch and hope and pray,', 'for her love in botany bay', 'whilst she is lonely in the fields of athenry.', 'oh, a wan cloud was drawn oer the dim weeping dawn', 'as to shannons side i returnd at last', 'and the heart in my breast for the girl i lovd best', 'was beating, ah, beating, loud and fast!', 'while the doubts and the fears of the long aching years', 'seemd mingling their voices with the moaning flood', 'till full in my path, like a wild water wrath', 'my true loves shadow lamenting stood.', 'but the sudden sun kissd the cold, cruel mist', 'into dancing showrs of diamond dew', 'and the dark flowing stream laughd back to his beam', 'and the lark soared aloft in the blue', 'while no phantom of night but a form of delight', 'ran with arms outspread to her darling boy', 'and the girl i love best on my wild throbbing breast', 'hid her thousand treasures with cry of joy.', 'gather up the pots and the old tin cans', 'the mash, the corn, the barley and the bran.', 'run like the devil from the excise man', 'keep the smoke from rising, barney.', 'keep your eyes well peeled today', 'the excise men are on their way', 'searching for the mountain tay', 'in the hills of connemara.', 'swinging to the left, swinging to the right', 'the excise men will dance all night', 'drinkin up the tay till the broad daylight', 'a gallon for the butcher and a quart for john', 'and a bottle for poor old father tom', 'just to help the poor old dear along', 'stand your ground, for its too late', 'the excise men are at the gate.', 'glory be to paddy, but theyre drinkin it straight', 'im sitting on the stile, mary, where we once sat side by side', 'on a bright may morning long ago, when first you were my bride', 'the corn was springing fresh and green, and the lark sang loud and high', 'and the red was on your lips, mary, and the love light in your eyes.', 'tis but a step down yonder lane, the village church stands near', 'the place where we were wed, mary, i can see the spire from here', 'but the graveyard lies between, mary, and my step might break your rest', 'where i laid you darling down to sleep with a baby on your breast.', 'im very lonely now, mary, for the poor make no new friends', 'but oh they love the better still the few our father sends', 'for you were all i had, mary, my blessing and my pride', 'and ive nothing left to care for now since my poor mary died.', 'yours was the good brave heart, mary, that still kept hoping on', 'when the trust in god had left my soul and my arms young strength had gone', 'there was comfort ever on your lip and a kind look on your brow', 'and i thank you mary for the same though you cannot hear me now.', 'im bidding you a long farewell, my mary kind and true', 'but ill not forget you, darling, in the land im going to', 'they say theres bread and work for all, and the sun shines always there', 'but ill neer forget old ireland, were it fifty times as fair.', 'and often in those grand old woods ill sit and shut my eyes', 'and my heart will wander back again to the place where mary lies', 'and i think ill see that little stile where we sat side by side', 'in the springing corn and the bright may morn when first you were my bride.', 'when i was at home i was merry and frisky,', 'my dad kept a pig and my mother sold whisky,', 'my uncle was rich, but never would by aisey', 'till i was enlisted by corporal casey.', 'och! rub a dub, row de dow, corporal casey,', 'my dear little shelah, i thought would run crazy,', 'when i trudged away with tough corporal casey.', 'i marched from kilkenny, and, as i was thinking', 'on shelah, my heart in my bosom was sinking,', 'but soon i was forced to look fresh as a daisy,', 'for fear of a drubbing from corporal casey.', 'och! rub a dub, row de dow, corporal casey!', 'the devil go with him, i neer could be lazy,', 'he struck my shirts so, ould corporal casey.', 'we went into battle, i took the blows fairly', 'that fell on my pate, but they bothered me rarely,', 'and who should the first be that dropped, why, and please ye,', 'it was my good friend, honest corporal casey.', 'thinks i you are quiet, and i shall be aisey,', 'so eight years i fought without corporal casey.', 'i am a little beggarman, a begging i have been', 'for three score years in this little isle of green', 'im known along the liffey from the basin to the zoo', 'and everybody calls me by the name of johnny dhu.', 'of all the trades a going, sure the begging is the best', 'for when a man is tired he can sit him down and rest', 'he can beg for his dinner, he has nothing else to do', 'but to slip around the corner with his old rigadoo.', 'i slept in a barn one night in currabawn', 'a shocking wet night it was, but i slept until the dawn', 'there was holes in the roof and the raindrops coming thru', 'and the rats and the cats were a playing peek a boo.', 'who did i waken but the woman of the house', 'with her white spotted apron and her calico blouse', 'she began to frighten and i said boo', 'sure, dont be afraid at all, its only johnny dhu.', 'i met a little girl while a walkin out one day', 'good morrow little flaxen haired girl, i did say', 'good morrow little beggarman and how do you do', 'with your rags and your tags and your auld rigadoo.', 'ill buy a pair of leggins and a collar and a tie', 'and a nice young lady ill go courting by and by', 'ill buy a pair of goggles and ill color them with blue', 'and an old fashioned lady i will make her too.', 'so all along the high road with my bag upon my back', 'over the fields with my bulging heavy sack', 'with holes in my shoes and my toes a peeping thru', 'singing, skin a ma rink a doodle with my auld rigadoo.', 'o i must be going to bed for its getting late at night', 'the fire is all raked and now tis out of light', 'for now youve heard the story of my auld rigadoo', 'so good and god be with you, from auld johnny dhu.', 'oh, the days of the kerry dancing', 'oh, the ring of the pipers tune', 'oh, for one of those hours of gladness', 'gone, alas, like our youth, too soon!', 'when the boys began to gather', 'in the glen of a summers night', 'and the kerry pipers tuning', 'made us long with wild delight!', 'oh, to think of it', 'oh, to dream of it', 'fills my heart with tears!', 'was there ever a sweeter colleen', 'in the dance than eily more', 'or a prouder lad than thady', 'as he boldly took the floor.', 'lads and lasses to your places', 'up the middle and down again', 'ah, the merry hearted laughter', 'ringing through the happy glen!', 'time goes on, and the happy years are dead', 'and one by one the merry hearts are fled', 'silent now is the wild and lonely glen', 'where the bright glad laugh will echo neer again', 'only dreaming of days gone by in my heart i hear.', 'loving voices of old companions', 'stealing out of the past once more', 'and the sound of the dear old music', 'soft and sweet as in days of yore.', 'dear thoughts are in my mind', 'and my soul soars enchanted,', 'as i hear the sweet lark sing', 'in the clear air of the day.', 'for a tender beaming smile', 'to my hope has been granted,', 'and tomorrow she shall hear', 'all my fond heart would say.', 'i shall tell her all my love,', 'all my souls adoration,', 'and i think she will hear', 'and will not say me nay.', 'it is this that gives my soul', 'all its joyous elation,', 'its cold and raw, the north winds blow', 'black in the morning early', 'when all the hills were covered with snow', 'oh then it was winter fairly.', 'as i was riding oer the moor', 'i met a farmers daughter', 'her cherry cheeks and coal-black hair', 'they caused my heart to falter.', 'i bowed my bonnet very low', 'to let her know my meaning.', 'she answered with a courteous smile', 'her looks they were engaging.', 'where are you bound my pretty maid', 'its now in the morning early?', 'the answer that she gave to me', 'kind sir, to sell my barley.', 'now twenty guineas ive in my purse', 'and twenty more thats yearly.', 'you need not go to the market town', 'for ill buy all your barley.', 'if twenty guineas would gain the heart', 'of the maid i love so dearly', 'all for to tarry with me one night', 'and go home in the morning early.', 'the very evening after', 'it was my fortune for to meet', 'the farmers only daughter.', 'although the weather being cold and raw', 'with her i thought to parlay', 'the answer that she gave to me:', 'kind sir, ive sold my barley.', 'the minstrel boy to the war is gone', 'in the ranks of death you will find him', 'his fathers sword he hath girded on', 'and his wild harp slung behind him', 'land of song! said the warrior bard', 'tho all the world betrays thee', 'one sword, at least, thy rights shall guard', 'one faithful harp shall praise thee!', 'the minstrel fell! but the foemans chain', 'could not bring that proud soul under', 'the harp he lovd neer spoke again', 'for he tore its chords asunder', 'and said no chains shall sully thee', 'thou soul of love and bravry!', 'thy songs were made for the pure and free,', 'they shall never sound in slavery!', 'oh mary this londons a wonderful sight', 'with people here workin by day and by night', 'they dont sow potatoes, nor barley, nor wheat', 'but theres gangs of them diggin for gold in the street', 'at least when i asked them thats what i was told', 'so i just took a hand at this diggin for gold', 'but for all that i found there i might as well be', 'where the mountains of mourne sweep down to the sea.', 'i believe that when writin a wish you expressed', 'as to how the fine ladies in london were dressed', 'well if youll believe me, when asked to a ball', 'they dont wear no top to their dresses at all', 'oh ive seen them meself and you could not in truth', 'say that if they were bound for a ball or a bath', 'dont be startin them fashions, now mary mccree', 'theres beautiful girls here, oh never you mind', 'with beautiful shapes nature never designed', 'and lovely complexions all roses and cream', 'but let me remark with regard to the same', 'that if that those roses you venture to sip', 'the colors might all come away on your lip', 'so ill wait for the wild rose thats waitin for me', 'in the place where the dark mourne sweeps down to the sea.', 'beauing, belling, dancing, drinking,', 'breaking windows, cursing, sinking', 'every raking, never thinking,', 'live the rakes of mallow,', 'spending faster than it comes,', 'beating waiters bailiffs, duns,', 'bacchus true begotten sons,', 'live the rakes of mallow.', 'one time naught but claret drinking,', 'then like politicians, thinking', 'to raise the sinking funds when sinking.', 'when at home, with da-da dying,', 'still for mellow water crying,', 'but, where theres good claret plying', 'live the rakes of mallow. ', 'when at home with dadda dying,', 'still for mallow-water crying,', 'but where there is good claret plying', 'living short but merry lives,', 'going where the devil drives,', 'having sweethearts, but no wives,', 'racking tenants stewards teasing,', 'swiftly spending, slowly raising,', 'wishing to spend all their days in', 'raking as at mallow.', 'then to end this raking life,', 'they get sober, take a wife,', 'ever after live in strife,', 'and wish again for mallow.', 'how sweet is to roam by the sunny shure stream', 'and hear the doves coo neath the morning sunbeam', 'where the thrush and the robin their sweet notes entwine', 'on the banks of the shure that flows down by mooncoin.', 'flow on, lovely river, flow gently along', 'by your waters so sweet sounds the larks merry song', 'on your green banks i wander where first i did join', 'with you, lovely molly, the rose of mooncoin.', 'oh molly, dear molly, it breaks my fond heart', 'to know that we two forever must part', 'ill think of you molly while sun and moon shine', 'then heres to the shure with its valley so fair', 'as oftimes we wandered in the cool morning air', 'where the roses are blooming and lilies entwine', 'the pale moon was rising above the green mountain', 'the sun was declining beneath the blue sea', 'when i strayed with my love to the pure crystal fountain', 'that stands in beautiful vale of tralee.', 'she was lovely and fair as the rose of the summer', 'yet, twas not her beauty alone that won me', 'oh no! twas the the truth in her eye ever beaming', 'that made me love mary, the rose of tralee.', 'the cool shades of evening their mantle were spreading', 'and mary all smiling was listening to me', 'the moon through the valley her pale rays was shedding', 'when i won the heart of the rose of tralee.', 'though lovely and fair as the rose of the summer', 'mellow the moonlight to shine is beginning', 'close by the window young eileen is spinning', 'bent oer the fire her blind grandmother sitting', 'crooning and moaning and drowsily knitting.', 'merrily cheerily noiselessly whirring', 'spins the wheel, rings the wheel while the foots stirring', 'sprightly and lightly and merrily ringing', 'sounds the sweet voice of the young maiden singing.', 'eileen, a chara, i hear someone tapping', 'tis the ivy dear mother against the glass flapping', 'eileen, i surely hear somebody sighing', 'tis the sound mother dear of the autumn winds dying.', 'whats the noise i hear at the window i wonder?', 'tis the little birds chirping, the holly-bush under', 'what makes you shoving and moving your stool on', 'and singing all wrong the old song of the coolin?', 'theres a form at the casement, the form of her true love', 'and he whispers with face bent, im waiting for you love', 'get up from the stool, through the lattice step lightly', 'and well rove in the grove while the moons shining brightly.', 'the maid shakes her head, on her lips lays her fingers', 'steps up from the stool, longs to go and yet lingers', 'a frightened glance turns to her drowsy grandmother', 'puts her foot on the stool spins the wheel with the other', 'lazily, easily, now swings the wheel round', 'slowly and lowly is heard now the reels sound', 'noiseless and light to the lattice above her', 'the maid steps, then leaps to the arms of her lover.', 'slower... and slower... and slower the wheel swings', 'lower... and lower... and lower the reel rings', 'ere the reel and the wheel stop their ringing and moving', 'through the grove the young lovers by moonlight are roving.', 'as i roved out one morning', 'near the verdant braes of skreen', 'i put my back to the mossy tree', 'to view the dew on the west countrie', 'the dew on the foreign strand.', 'o sit ye down on the grass, he said', 'on the dewy grass so green', 'for the wee birds all have come and gone', 'since i my true love seen, he said', 'since i my true love seen.', 'o ill not sit on the grass, she said', 'no lover ill be of thine', 'for i hear you love a connaught maid', 'and your hearts no longer mine, she said', 'and your hearts no longer mine.', 'o i will climb a high high tree', 'and ill rob a wild birds nest', 'and back ill bring what i find there', 'to the arms that i love best, he said', 'to the arms that i love best.', 'the water is wide, i cannot get oer', 'neither have i wings to fly', 'give me a boat that can carry two', 'and both shall row, my love and i', 'a ship there is and she sails the sea', 'shes loaded deep as deep can be', 'but not so deep as the love im in', 'i know not if i sink or swim', 'i leaned my back against an oak', 'thinking it was a trusty tree', 'but first it bent and then it broke', 'so did my love prove false to me', 'i reached my finger into some soft bush', 'thinking the fairest flower to find', 'i pricked my finger to the bone', 'and left the fairest flower behind', 'oh love be handsome and love be kind', 'gay as a jewel when first it is new', 'but love grows old and waxes cold', 'and fades away like the morning dew', 'must i go bound while you go free', 'must i love a man who doesnt love me', 'must i be born with so little art', 'as to love a man wholl break my heart', 'when cockle shells turn silver bells', 'then will my love come back to me', 'when roses bloom in winters gloom', 'then will my love return to me', 'o paddy dear, and did ye hear the news thats goin round?', 'the shamrock is by law forbid to grow on irish ground!', 'no more saint patricks day well keep, his color cant be seen', 'for theres a cruel law agin the wearin o the green.', 'i met with napper tandy, and he took me by the hand', 'and he said, hows poor old ireland, and how does she stand?', 'shes the most distressful country that ever yet was seen', 'for theyre hanging men and women there for the wearin o the green.', 'so if the color we must wear be englands cruel red', 'let it remind us of the blood that irishmen have shed', 'and pull the shamrock from your hat, and throw it on the sod', 'but never fear, twill take root there, though underfoot tis trod.', 'when laws can stop the blades of grass from growin as they grow', 'and when the leaves in summer-time their color dare not show', 'then i will change the color too i wear in my caubeen', 'but till that day, please god, ill stick to the wearin o the green.', 'ive been a wild rover for many a year', 'and i spent all my money on whiskey and beer,', 'and now im returning with gold in great store', 'and i never will play the wild rover no more.', 'and its no, nay, never,', 'no nay never no more,', 'will i play the wild rover', 'no never no more.', 'i went to an ale-house i used to frequent', 'and i told the landlady my money was spent.', 'i asked her for credit, she answered me nay', 'such a custom as yours i could have any day.', 'i took from my pocket ten sovereigns bright', 'and the landladys eyes opened wide with delight.', 'she said i have whiskey and wines of the best', 'and the words that i spoke sure were only in jest.', 'ill go home to my parents, confess what ive done', 'and ill ask them to pardon their prodigal son.', 'and if they caress (forgive) me as ofttimes before', 'sure i never will play the wild rover no more.', 'theres a tear in your eye,', 'and im wondering why,', 'for it never should be there at all.', 'with such powr in your smile,', 'sure a stone youd beguile,', 'so theres never a teardrop should fall.', 'when your sweet lilting laughters', 'like some fairy song,', 'and your eyes twinkle bright as can be;', 'you should laugh all the while', 'and all other times smile,', 'and now, smile a smile for me.', 'when irish eyes are smiling,', 'sure, tis like the morn in spring.', 'in the lilt of irish laughter', 'you can hear the angels sing.', 'when irish hearts are happy,', 'all the world seems bright and gay.', 'and when irish eyes are smiling,', 'sure, they steal your heart away.', 'for your smile is a part', 'of the love in your heart,', 'and it makes even sunshine more bright.', 'like the linnets sweet song,', 'crooning all the day long,', 'comes your laughter and light.', 'for the springtime of life', 'is the sweetest of all', 'there is neer a real care or regret;', 'and while springtime is ours', 'throughout all of youths hours,', 'let us smile each chance we get.', 'as i was a-goin over gilgarra mountain', 'i spied colonel farrell, and his money he was countin.', 'first i drew my pistols and then i drew my rapier,', 'sayin stand and deliver, for i am your bold receiver.', 'musha ringum duram da,', 'whack fol the daddy-o,', 'he counted out his money and it made a pretty penny;', 'i put it in my pocket to take home to darlin jenny.', 'she sighed and swore she loved me and never would deceive me,', 'bu the devil take the women, for they always lie so easy!', 'musha rungum duram da', 'i went into me chamber all for to take a slumber,', 'to dream of gold and girls, and of course it was no wonder:', 'me jenny took me charges and she filled them up with water,', 'called on colonel farrell to get ready for the slaughter.', 'next mornin early, before i rose for travel,', 'a-came a band of footmen and likewise colonel farrell.', 'i goes to draw my pistol, for shed stole away my rapier,', 'but a prisoner i was taken, i couldnt shoot the water.', 'they put me into jail with a judge all a-writin:', 'for robbin colonel farrell on gilgarra mountain.', 'but they didnt take me fists and i knocked the jailer down', 'and bid a farewell to this tight-fisted town.', 'musha ringum duram da', 'id like to find me brother, the one whos in the army;', 'i dont know where hes stationed, be it cork or in killarney.', 'together wed go roamin oer the mountains of kilkenny,', 'and i swear hed treat me fairer than my darlin sportin jenny!', 'theres some takes delight in the carriages and rollin,', 'some takes delight in the hurley or the bollin,', 'but i takes delight in the juice of the barley,', 'courtin pretty maids in the mornin, o so early!', 'oh the summertime is coming', 'and the trees are sweetly blooming', 'and the wild mountain thyme', 'grows around the blooming heather', 'will ye go, lassie go?', 'and well all go together', 'to pluck wild mountain thyme', 'all around the blooming heather', 'i will build my love a tower', 'near yon pure crystal fountain', 'and on it i will build', 'all the flowers of the mountain', 'if my true love she were gone', 'i would surely find another', 'where wild mountain thyme', '']
###Markdown
From here, you can initialize the `Tokenizer` class and generate the word index dictionary:
###Code
# Initialize the Tokenizer class
tokenizer = Tokenizer()
# Generate the word index dictionary
tokenizer.fit_on_texts(corpus)
# Define the total words. You add 1 for the index `0` which is just the padding token.
total_words = len(tokenizer.word_index) + 1
print(f'word index dictionary: {tokenizer.word_index}')
print(f'total words: {total_words}')
###Output
word index dictionary: {'the': 1, 'and': 2, 'i': 3, 'to': 4, 'a': 5, 'of': 6, 'my': 7, 'in': 8, 'me': 9, 'for': 10, 'you': 11, 'all': 12, 'was': 13, 'she': 14, 'that': 15, 'on': 16, 'with': 17, 'her': 18, 'but': 19, 'as': 20, 'when': 21, 'love': 22, 'is': 23, 'your': 24, 'it': 25, 'will': 26, 'from': 27, 'by': 28, 'they': 29, 'be': 30, 'are': 31, 'so': 32, 'he': 33, 'old': 34, 'no': 35, 'oh': 36, 'ill': 37, 'at': 38, 'one': 39, 'his': 40, 'there': 41, 'were': 42, 'heart': 43, 'down': 44, 'now': 45, 'we': 46, 'where': 47, 'young': 48, 'never': 49, 'go': 50, 'come': 51, 'then': 52, 'did': 53, 'not': 54, 'said': 55, 'away': 56, 'their': 57, 'sweet': 58, 'them': 59, 'green': 60, 'if': 61, 'take': 62, 'our': 63, 'like': 64, 'night': 65, 'day': 66, 'o': 67, 'out': 68, 'fair': 69, 'this': 70, 'town': 71, 'have': 72, 'can': 73, 'true': 74, 'its': 75, 'thou': 76, 'see': 77, 'dear': 78, 'more': 79, 'theres': 80, 'or': 81, 'had': 82, 'would': 83, 'over': 84, 'hear': 85, 'up': 86, 'ive': 87, 'through': 88, 'home': 89, 'again': 90, 'well': 91, 'oer': 92, 'land': 93, 'good': 94, 'im': 95, 'ye': 96, 'sea': 97, 'left': 98, 'still': 99, 'father': 100, 'long': 101, 'rose': 102, 'could': 103, 'morning': 104, 'wild': 105, 'who': 106, 'eyes': 107, 'came': 108, 'while': 109, 'too': 110, 'back': 111, 'little': 112, 'an': 113, 'took': 114, 'him': 115, 'bow': 116, 'first': 117, 'let': 118, 'man': 119, 'shall': 120, 'know': 121, 'get': 122, 'high': 123, 'gone': 124, 'say': 125, 'ever': 126, 'some': 127, 'mary': 128, 'hand': 129, 'till': 130, 'put': 131, 'own': 132, 'time': 133, 'heard': 134, 'dead': 135, 'may': 136, 'bright': 137, 'mountain': 138, 'early': 139, 'rosin': 140, 'gave': 141, 'thee': 142, 'only': 143, 'far': 144, 'maid': 145, 'must': 146, 'find': 147, 'girl': 148, 'sure': 149, 'round': 150, 'dublin': 151, 'once': 152, 'world': 153, 'delight': 154, 'last': 155, 'johnny': 156, 'seen': 157, 'has': 158, 'fine': 159, 'road': 160, 'mother': 161, 'tis': 162, 'what': 163, 'way': 164, 'moon': 165, 'soul': 166, 'neer': 167, 'id': 168, 'just': 169, 'thats': 170, 'days': 171, 'darling': 172, 'went': 173, 'white': 174, 'die': 175, 'than': 176, 'hair': 177, 'goes': 178, 'meet': 179, 'today': 180, 'do': 181, 'girls': 182, 'shes': 183, 'thyme': 184, 'thy': 185, 'sing': 186, 'pretty': 187, 'new': 188, 'poor': 189, 'into': 190, 'life': 191, 'irish': 192, 'give': 193, 'boy': 194, 'youre': 195, 'make': 196, 'passed': 197, 'lovely': 198, 'black': 199, 'youll': 200, 'died': 201, 'red': 202, 'smile': 203, 'keep': 204, 'loves': 205, 'free': 206, 'leave': 207, 'friends': 208, 'each': 209, 'saw': 210, 'behind': 211, 'song': 212, 'ra': 213, 'dont': 214, 'arms': 215, 'am': 216, 'sun': 217, 'saying': 218, 'made': 219, 'wish': 220, 'cold': 221, 'met': 222, 'before': 223, 'should': 224, 'rocky': 225, 'light': 226, 'wid': 227, 'boys': 228, 'best': 229, 'fields': 230, 'since': 231, 'ball': 232, 'water': 233, 'casey': 234, 'mind': 235, 'along': 236, 'loved': 237, 'place': 238, 'ireland': 239, 'next': 240, 'three': 241, 'many': 242, 'years': 243, 'door': 244, 'us': 245, 'drink': 246, 'got': 247, 'might': 248, 'live': 249, 'roses': 250, 'play': 251, 'soon': 252, 'ground': 253, 'times': 254, 'spent': 255, 'going': 256, 'tree': 257, 'barley': 258, 'grass': 259, 'kind': 260, 'twas': 261, 'bridge': 262, 'around': 263, 'blue': 264, 'tell': 265, 'row': 266, 'how': 267, 'money': 268, 'merry': 269, 'stepped': 270, 'corporal': 271, 'always': 272, 'though': 273, 'near': 274, 'taken': 275, 'ones': 276, 'daughter': 277, 'forever': 278, 'loo': 279, 'shining': 280, 'plenty': 281, 'hes': 282, 'ship': 283, 'banks': 284, 'think': 285, 'very': 286, 'stand': 287, 'heres': 288, 'snow': 289, 'mountains': 290, 'molly': 291, 'wheel': 292, 'street': 293, 'erin': 294, 'side': 295, 'feet': 296, 'star': 297, 'look': 298, 'brave': 299, 'woman': 300, 'sons': 301, 'two': 302, 'says': 303, 'asked': 304, 'lanigans': 305, 'singing': 306, 'men': 307, 'toome': 308, 'stole': 309, 'god': 310, 'hill': 311, 'lonely': 312, 'lover': 313, 'tears': 314, 'fathers': 315, 'low': 316, 'voice': 317, 'quite': 318, 'able': 319, 'nice': 320, 'laid': 321, 'comrades': 322, 'wind': 323, 'another': 324, 'sit': 325, 'face': 326, 'band': 327, 'call': 328, 'colleen': 329, 'until': 330, 'hills': 331, 'mine': 332, 'above': 333, 'upon': 334, 'eer': 335, 'youve': 336, 'fly': 337, 'been': 338, 'late': 339, 'alive': 340, 'ballyjamesduff': 341, 'looked': 342, 'great': 343, 'why': 344, 'every': 345, 'proud': 346, 'found': 347, 'bragh': 348, 'such': 349, 'birds': 350, 'wedding': 351, 'welcome': 352, 'dancing': 353, 'da': 354, 'fell': 355, 'thinking': 356, 'roddy': 357, 'mccorley': 358, 'smiling': 359, 'mallow': 360, 'blooming': 361, 'thought': 362, 'peace': 363, 'soft': 364, 'pure': 365, 'harp': 366, 'dream': 367, 'alas': 368, 'yet': 369, 'clear': 370, 'art': 371, 'off': 372, 'hope': 373, 'fought': 374, 'mothers': 375, 'shore': 376, 'ago': 377, 'fol': 378, 'de': 379, 'house': 380, 'married': 381, 'bound': 382, 'danced': 383, 'devil': 384, 'dawning': 385, 'makes': 386, 'same': 387, 'sat': 388, 'any': 389, 'glass': 390, 'gay': 391, 'relations': 392, 'evening': 393, 'watched': 394, 'right': 395, 'fellows': 396, 'whiskey': 397, 'bonnie': 398, 'grows': 399, 'women': 400, 'flowers': 401, 'beauty': 402, 'cannot': 403, 'handsome': 404, 'happy': 405, 'gold': 406, 'rover': 407, 'none': 408, 'doneen': 409, 'summers': 410, 'people': 411, 'set': 412, 'paddy': 413, 'morn': 414, 'most': 415, 'easy': 416, 'struck': 417, 'beautiful': 418, 'those': 419, 'golden': 420, 'run': 421, 'pipes': 422, 'glen': 423, 'dying': 424, 'here': 425, 'wall': 426, 'across': 427, 'fire': 428, 'eileen': 429, 'longer': 430, 'cheeks': 431, 'valley': 432, 'both': 433, 'dew': 434, 'care': 435, 'bride': 436, 'nothing': 437, 'wont': 438, 'theyre': 439, 'colonel': 440, 'maiden': 441, 'shed': 442, 'til': 443, 'brown': 444, 'breast': 445, 'corn': 446, 'sinking': 447, 'began': 448, 'name': 449, 'cruel': 450, 'sound': 451, 'spancil': 452, 'county': 453, 'lies': 454, 'color': 455, 'thing': 456, 'decay': 457, 'sleep': 458, 'hours': 459, 'loving': 460, 'weary': 461, 'ringing': 462, 'please': 463, 'forget': 464, 'lie': 465, 'ran': 466, 'tore': 467, 'country': 468, 'fear': 469, 'fortune': 470, 'kissed': 471, 'alone': 472, 'ould': 473, 'cry': 474, 'dreams': 475, 'used': 476, 'horse': 477, 'break': 478, 'bells': 479, 'didnt': 480, 'weeks': 481, 'without': 482, 'raw': 483, 'nor': 484, 'twenty': 485, 'tune': 486, 'hed': 487, 'roving': 488, 'leaves': 489, 'cant': 490, 'death': 491, 'ten': 492, 'prison': 493, 'judge': 494, 'against': 495, 'lads': 496, 'shell': 497, 'fill': 498, 'valleys': 499, 'other': 500, 'pale': 501, 'joy': 502, 'wide': 503, 'bring': 504, 'ah': 505, 'cliffs': 506, 'city': 507, 'end': 508, 'turn': 509, 'sky': 510, 'born': 511, 'knew': 512, 'smiled': 513, 'rosie': 514, 'comes': 515, 'sayin': 516, 'lord': 517, 'dungannon': 518, 'blood': 519, 'air': 520, 'danny': 521, 'calling': 522, 'sunshine': 523, 'spring': 524, 'bid': 525, 'grow': 526, 'truth': 527, 'tear': 528, 'rings': 529, 'guns': 530, 'bay': 531, 'oflynn': 532, 'och': 533, 'stick': 534, 'rest': 535, 'four': 536, 'jewel': 537, 'tried': 538, 'grief': 539, 'answer': 540, 'kathleen': 541, 'fond': 542, 'eye': 543, 'goin': 544, 'pistols': 545, 'musha': 546, 'whack': 547, 'creole': 548, 'together': 549, 'room': 550, 'fall': 551, 'swore': 552, 'being': 553, 'step': 554, 'lark': 555, 'cailã\xadn': 556, 'deas': 557, 'crãºite': 558, 'na': 559, 'mbã³': 560, 'sir': 561, 'isle': 562, 'waiting': 563, 'magic': 564, 'skibbereen': 565, 'loud': 566, 'raise': 567, 'bent': 568, 'aged': 569, 'summer': 570, 'jenny': 571, 'excise': 572, 'rigadoo': 573, 'auld': 574, 'hearts': 575, 'nay': 576, 'stool': 577, 'farrell': 578, 'garden': 579, 'precious': 580, 'child': 581, 'slumber': 582, 'sleeping': 583, 'watch': 584, 'gently': 585, 'minstrel': 586, 'praise': 587, 'bell': 588, 'shaken': 589, 'immortal': 590, 'pray': 591, 'stay': 592, 'spoke': 593, 'cross': 594, 'brothers': 595, 'much': 596, 'past': 597, 'killarney': 598, 'sang': 599, 'tones': 600, 'ral': 601, 'wander': 602, 'cot': 603, 'feel': 604, 'yore': 605, 'answered': 606, 'divil': 607, 'middle': 608, 'bit': 609, 'led': 610, 'soldiers': 611, 'lily': 612, 'bed': 613, 'lassie': 614, 'clothes': 615, 'return': 616, 'broken': 617, 'derry': 618, 'sighed': 619, 'english': 620, 'tomorrow': 621, 'souls': 622, 'van': 623, 'diemans': 624, 'law': 625, 'neither': 626, 'winds': 627, 'rather': 628, 'doesnt': 629, 'rosy': 630, 'neatest': 631, 'hands': 632, 'whereon': 633, 'stands': 634, 'write': 635, 'thousand': 636, 'fare': 637, 'youd': 638, 'velvet': 639, 'neat': 640, 'landed': 641, 'health': 642, 'kellswater': 643, 'quiet': 644, 'stars': 645, 'beside': 646, 'warm': 647, 'sunday': 648, 'grey': 649, 'ocean': 650, 'sad': 651, 'spend': 652, 'kilkenny': 653, 'silver': 654, 'view': 655, 'west': 656, 'plain': 657, 'barrow': 658, 'broad': 659, 'narrow': 660, 'crying': 661, 'wonder': 662, 'save': 663, 'stop': 664, 'tender': 665, 'told': 666, 'lip': 667, 'dance': 668, 'foot': 669, 'kilrain': 670, 'saint': 671, 'visit': 672, 'mossy': 673, 'wexford': 674, 'irishmen': 675, 'shadow': 676, 'tho': 677, 'salley': 678, 'gardens': 679, 'foolish': 680, 'youth': 681, 'fade': 682, 'war': 683, 'believe': 684, 'which': 685, 'change': 686, 'entwine': 687, 'turns': 688, 'turned': 689, 'crown': 690, 'played': 691, 'captain': 692, 'blow': 693, 'children': 694, 'slainte': 695, 'gentle': 696, 'heavens': 697, 'bloom': 698, 'grand': 699, 'bush': 700, 'nest': 701, 'rich': 702, 'parting': 703, 'better': 704, 'window': 705, 'haste': 706, 'fresh': 707, 'stream': 708, 'rays': 709, 'ma': 710, 'ring': 711, 'lad': 712, 'athy': 713, 'drop': 714, 'hardly': 715, 'done': 716, 'arm': 717, 'leg': 718, 'beg': 719, 'drew': 720, 'bold': 721, 'drawn': 722, 'jail': 723, 'writin': 724, 'farewell': 725, 'tired': 726, 'lake': 727, 'want': 728, 'ringlets': 729, 'myself': 730, 'songs': 731, 'reel': 732, 'steps': 733, 'hearty': 734, 'fainted': 735, 'called': 736, 'under': 737, 'toe': 738, 'mairi': 739, 'fairest': 740, 'darlin': 741, 'bird': 742, 'memory': 743, 'lips': 744, 'sweetly': 745, 'morrow': 746, 'consent': 747, 'else': 748, 'sold': 749, 'stout': 750, 'pair': 751, 'drinking': 752, 'meself': 753, 'fray': 754, 'pike': 755, 'coat': 756, 'beneath': 757, 'rent': 758, 'part': 759, 'half': 760, 'head': 761, 'friend': 762, 'standing': 763, 'floor': 764, 'bare': 765, 'wed': 766, 'son': 767, 'pride': 768, 'vision': 769, 'sword': 770, 'after': 771, 'won': 772, 'farmers': 773, 'flower': 774, 'nut': 775, 'surely': 776, 'stood': 777, 'wandered': 778, 'athenry': 779, 'rising': 780, 'beating': 781, 'form': 782, 'dhu': 783, 'buy': 784, 'laughter': 785, 'wear': 786, 'raking': 787, 'rakes': 788, 'claret': 789, 'shure': 790, 'tralee': 791, 'slower': 792, 'lower': 793, 'deep': 794, 'wearin': 795, 'duram': 796, 'takes': 797, 'beware': 798, 'steal': 799, 'brings': 800, 'things': 801, 'joys': 802, 'bunch': 803, 'sailor': 804, 'chanced': 805, 'pass': 806, 'angels': 807, 'send': 808, 'drowsy': 809, 'keeping': 810, 'spirit': 811, 'stealing': 812, 'feeling': 813, 'roam': 814, 'presence': 815, 'heavenward': 816, 'dust': 817, 'dim': 818, 'journey': 819, 'waves': 820, 'frightened': 821, 'leaving': 822, 'struggle': 823, 'parents': 824, 'courage': 825, 'weeping': 826, 'pain': 827, 'mist': 828, 'felt': 829, 'roared': 830, 'making': 831, 'fever': 832, 'moment': 833, 'distance': 834, 'wailing': 835, 'oft': 836, 'held': 837, 'fast': 838, 'cabin': 839, 'honey': 840, 'diddle': 841, 'clearly': 842, 'open': 843, 'opened': 844, 'table': 845, 'wine': 846, 'lay': 847, 'shells': 848, 'sailed': 849, 'drown': 850, 'fetters': 851, 'chains': 852, 'wives': 853, 'sorrow': 854, 'thoughts': 855, 'cursed': 856, 'hell': 857, 'five': 858, 'buried': 859, 'lost': 860, 'endless': 861, 'slavery': 862, 'gun': 863, 'rain': 864, 'cares': 865, 'ghosts': 866, 'runaway': 867, 'twill': 868, 'month': 869, 'meadows': 870, 'prettiest': 871, 'winters': 872, 'satisfied': 873, 'few': 874, 'short': 875, 'lines': 876, 'shone': 877, 'shoulder': 878, 'belfast': 879, 'trade': 880, 'bad': 881, 'caused': 882, 'stray': 883, 'meaning': 884, 'damsel': 885, 'appear': 886, 'seven': 887, 'sentence': 888, 'jolly': 889, 'whenever': 890, 'wee': 891, 'wife': 892, 'lives': 893, 'martha': 894, 'courted': 895, 'bridgit': 896, 'omalley': 897, 'desolation': 898, 'thorn': 899, 'gaze': 900, 'stone': 901, 'approaching': 902, 'sets': 903, 'carrigfergus': 904, 'nights': 905, 'swim': 906, 'wings': 907, 'sober': 908, 'travel': 909, 'native': 910, 'places': 911, 'slopes': 912, 'hares': 913, 'lofty': 914, 'malone': 915, 'wheeled': 916, 'streets': 917, 'enough': 918, 'reilly': 919, 'tough': 920, 'whispers': 921, 'phil': 922, 'threw': 923, 'straight': 924, 'belles': 925, 'moor': 926, 'brand': 927, 'shapes': 928, 'work': 929, 'vow': 930, 'blarney': 931, 'paid': 932, 'bower': 933, 'remain': 934, 'charming': 935, 'storied': 936, 'chieftains': 937, 'slaughter': 938, 'bann': 939, 'boyne': 940, 'liffey': 941, 'gallant': 942, 'awake': 943, 'greet': 944, 'meadow': 945, 'sweeter': 946, 'dirty': 947, 'cats': 948, 'crossed': 949, 'field': 950, 'river': 951, 'full': 952, 'aroon': 953, 'sends': 954, 'woe': 955, 'chain': 956, 'main': 957, 'charms': 958, 'fondly': 959, 'fleet': 960, 'fairy': 961, 'thine': 962, 'known': 963, 'truly': 964, 'close': 965, 'story': 966, 'flag': 967, 'sweetest': 968, 'honor': 969, 'playing': 970, 'mauser': 971, 'music': 972, 'tom': 973, 'hurrah': 974, 'big': 975, 'lead': 976, 'south': 977, 'generation': 978, 'freedom': 979, 'agin': 980, 'creature': 981, 'dad': 982, 'venture': 983, 'word': 984, 'wonderful': 985, 'crazy': 986, 'lazy': 987, 'grave': 988, 'jest': 989, 'remark': 990, 'strangers': 991, 'strong': 992, 'shook': 993, 'walk': 994, 'north': 995, 'ours': 996, 'cease': 997, 'strife': 998, 'whats': 999, 'lilacs': 1000, 'prove': 1001, 'sweetheart': 1002, 'letters': 1003, 'sent': 1004, 'speak': 1005, 'brow': 1006, 'albert': 1007, 'mooney': 1008, 'fighting': 1009, 'fingers': 1010, 'toes': 1011, 'john': 1012, 'hurroo': 1013, 'drums': 1014, 'beguiled': 1015, 'carry': 1016, 'bone': 1017, 'havent': 1018, 'walkin': 1019, 'kilgary': 1020, 'pepper': 1021, 'countin': 1022, 'forth': 1023, 'deliver': 1024, 'daddy': 1025, 'em': 1026, 'deceive': 1027, 'between': 1028, 'even': 1029, 'prisoner': 1030, 'fists': 1031, 'knocked': 1032, 'carriages': 1033, 'rollin': 1034, 'juice': 1035, 'courtin': 1036, 'ponchartrain': 1037, 'does': 1038, 'stranger': 1039, 'marry': 1040, 'adieu': 1041, 'ask': 1042, 'tipped': 1043, 'arrived': 1044, 'ladies': 1045, 'potatoes': 1046, 'courting': 1047, 'miss': 1048, 'small': 1049, 'ned': 1050, 'ribbons': 1051, 'heel': 1052, 'bonny': 1053, 'pipe': 1054, 'thrush': 1055, 'sweethearts': 1056, 'unto': 1057, 'rise': 1058, 'softly': 1059, 'milking': 1060, 'rare': 1061, 'pity': 1062, 'treasure': 1063, 'noon': 1064, 'sailing': 1065, 'banish': 1066, 'riches': 1067, 'comfort': 1068, 'yonder': 1069, 'flows': 1070, 'fairer': 1071, 'lass': 1072, 'woods': 1073, 'strayed': 1074, 'locks': 1075, 'breaking': 1076, 'june': 1077, 'started': 1078, 'hearted': 1079, 'beer': 1080, 'daylight': 1081, 'among': 1082, 'bundle': 1083, 'connaught': 1084, 'quay': 1085, 'erins': 1086, 'galway': 1087, 'fearless': 1088, 'bravely': 1089, 'marches': 1090, 'fate': 1091, 'neck': 1092, 'trod': 1093, 'marched': 1094, 'antrim': 1095, 'sash': 1096, 'flashed': 1097, 'hath': 1098, 'foemans': 1099, 'fight': 1100, 'heavy': 1101, 'bore': 1102, 'mans': 1103, 'counter': 1104, 'dozen': 1105, 'gallon': 1106, 'bottles': 1107, 'diamond': 1108, 'resemble': 1109, 'tiny': 1110, 'friendly': 1111, 'weather': 1112, 'inside': 1113, 'remember': 1114, 'someone': 1115, 'hat': 1116, 'body': 1117, 'dancers': 1118, 'hanging': 1119, 'empty': 1120, 'shoes': 1121, 'broke': 1122, 'december': 1123, 'move': 1124, 'reason': 1125, 'roof': 1126, 'naught': 1127, 'tower': 1128, 'power': 1129, 'king': 1130, 'dreaming': 1131, 'crew': 1132, 'whos': 1133, 'mccann': 1134, 'smoke': 1135, 'notes': 1136, 'yeoman': 1137, 'cavalry': 1138, 'guard': 1139, 'forced': 1140, 'brother': 1141, 'cousin': 1142, 'blame': 1143, 'croppy': 1144, 'dressed': 1145, 'trees': 1146, 'wore': 1147, 'words': 1148, 'swiftly': 1149, 'dawn': 1150, 'lovd': 1151, 'voices': 1152, 'moaning': 1153, 'dark': 1154, 'gather': 1155, 'tay': 1156, 'swinging': 1157, 'drinkin': 1158, 'sitting': 1159, 'stile': 1160, 'springing': 1161, 'yours': 1162, 'kept': 1163, 'aisey': 1164, 'rub': 1165, 'dub': 1166, 'dow': 1167, 'shelah': 1168, 'fairly': 1169, 'beggarman': 1170, 'begging': 1171, 'slept': 1172, 'holes': 1173, 'coming': 1174, 'thru': 1175, 'boo': 1176, 'lady': 1177, 'kerry': 1178, 'pipers': 1179, 'laugh': 1180, 'beaming': 1181, 'guineas': 1182, 'least': 1183, 'diggin': 1184, 'mourne': 1185, 'spending': 1186, 'mellow': 1187, 'plying': 1188, 'slowly': 1189, 'mooncoin': 1190, 'flow': 1191, 'sounds': 1192, 'shine': 1193, 'cool': 1194, 'crystal': 1195, 'fountain': 1196, 'moonlight': 1197, 'grandmother': 1198, 'crooning': 1199, 'merrily': 1200, 'spins': 1201, 'lightly': 1202, 'moving': 1203, 'lattice': 1204, 'grove': 1205, 'swings': 1206, 'finger': 1207, 'shamrock': 1208, 'pocket': 1209, 'springtime': 1210, 'gilgarra': 1211, 'rapier': 1212, 'ringum': 1213, 'mornin': 1214, 'heather': 1215, 'build': 1216, 'maidens': 1217, 'prime': 1218, 'nlyme': 1219, 'flavours': 1220, 'lusty': 1221, 'reminded': 1222, 'attend': 1223, 'guardian': 1224, 'creeping': 1225, 'dale': 1226, 'vigil': 1227, 'visions': 1228, 'revealing': 1229, 'breathes': 1230, 'holy': 1231, 'strains': 1232, 'hover': 1233, 'hark': 1234, 'solemn': 1235, 'winging': 1236, 'earthly': 1237, 'shalt': 1238, 'awaken': 1239, 'destiny': 1240, 'emigrants': 1241, 'amid': 1242, 'longing': 1243, 'parted': 1244, 'townland': 1245, 'vessel': 1246, 'crowded': 1247, 'disquieted': 1248, 'folk': 1249, 'escape': 1250, 'hardship': 1251, 'sustaining': 1252, 'glimpse': 1253, 'faded': 1254, 'strangely': 1255, 'seas': 1256, 'anger': 1257, 'desperate': 1258, 'plight': 1259, 'worsened': 1260, 'delirium': 1261, 'possessed': 1262, 'clouded': 1263, 'prayers': 1264, 'begged': 1265, 'forgiveness': 1266, 'seeking': 1267, 'distant': 1268, 'mither': 1269, 'simple': 1270, 'ditty': 1271, 'ld': 1272, 'li': 1273, 'hush': 1274, 'lullaby': 1275, 'huggin': 1276, 'hummin': 1277, 'rock': 1278, 'asleep': 1279, 'outside': 1280, 'modestly': 1281, 'ry': 1282, 'ay': 1283, 'di': 1284, 're': 1285, 'dai': 1286, 'rie': 1287, 'shc': 1288, 'bridle': 1289, 'stable': 1290, 'oats': 1291, 'eat': 1292, 'soldier': 1293, 'aisy': 1294, 'arose': 1295, 'christmas': 1296, '1803': 1297, 'australia': 1298, 'marks': 1299, 'carried': 1300, 'rusty': 1301, 'iron': 1302, 'wains': 1303, 'mainsails': 1304, 'unfurled': 1305, 'curses': 1306, 'hurled': 1307, 'swell': 1308, 'moth': 1309, 'firelights': 1310, 'horses': 1311, 'rode': 1312, 'taking': 1313, 'hades': 1314, 'twilight': 1315, 'forty': 1316, 'slime': 1317, 'climate': 1318, 'bravery': 1319, 'ended': 1320, 'bond': 1321, 'rebel': 1322, 'iii': 1323, 'violin': 1324, 'clay': 1325, 'sooner': 1326, 'sport': 1327, 'colour': 1328, 'knows': 1329, 'earth': 1330, 'serve': 1331, 'clyde': 1332, 'mourn': 1333, 'weep': 1334, 'suffer': 1335, 'diamonds': 1336, 'queen': 1337, 'hung': 1338, 'tied': 1339, 'apprenticed': 1340, 'happiness': 1341, 'misfortune': 1342, 'follow': 1343, 'strolling': 1344, 'selling': 1345, 'bar': 1346, 'customer': 1347, 'slipped': 1348, 'luck': 1349, 'jury': 1350, 'trial': 1351, 'case': 1352, 'warning': 1353, 'liquor': 1354, 'porter': 1355, 'pleasures': 1356, 'fishing': 1357, 'farming': 1358, 'glens': 1359, 'softest': 1360, 'dripping': 1361, 'snare': 1362, 'lose': 1363, 'court': 1364, 'primrose': 1365, 'bee': 1366, 'hopeless': 1367, 'wonders': 1368, 'admiration': 1369, 'haunt': 1370, 'wherever': 1371, 'sands': 1372, 'purer': 1373, 'within': 1374, 'grieve': 1375, 'drumslieve': 1376, 'ballygrant': 1377, 'deepest': 1378, 'boatsman': 1379, 'ferry': 1380, 'childhood': 1381, 'reflections': 1382, 'boyhood': 1383, 'melting': 1384, 'roaming': 1385, 'reported': 1386, 'marble': 1387, 'stones': 1388, 'ink': 1389, 'support': 1390, 'drunk': 1391, 'seldom': 1392, 'sick': 1393, 'numbered': 1394, 'foam': 1395, 'compare': 1396, 'sights': 1397, 'coast': 1398, 'clare': 1399, 'kilkee': 1400, 'kilrush': 1401, 'watching': 1402, 'pheasants': 1403, 'homes': 1404, 'streams': 1405, 'dublins': 1406, 'cockles': 1407, 'mussels': 1408, 'fish': 1409, 'monger': 1410, 'ghost': 1411, 'wheels': 1412, 'eden': 1413, 'vanished': 1414, 'finea': 1415, 'halfway': 1416, 'cootehill': 1417, 'gruff': 1418, 'whispering': 1419, 'crow': 1420, 'newborn': 1421, 'babies': 1422, 'huff': 1423, 'start': 1424, 'sorrowful': 1425, 'squall': 1426, 'babys': 1427, 'toil': 1428, 'worn': 1429, 'fore': 1430, 'flute': 1431, 'yer': 1432, 'boot': 1433, 'magee': 1434, 'scruff': 1435, 'slanderin': 1436, 'marchin': 1437, 'assisted': 1438, 'drain': 1439, 'dudeen': 1440, 'puff': 1441, 'whisperings': 1442, 'barrin': 1443, 'chocolate': 1444, 'feegee': 1445, 'sort': 1446, 'moonshiny': 1447, 'stuff': 1448, 'addle': 1449, 'brain': 1450, 'ringin': 1451, 'glamour': 1452, 'gas': 1453, 'guff': 1454, 'whisper': 1455, 'oil': 1456, 'remarkable': 1457, 'policeman': 1458, 'bluff': 1459, 'maintain': 1460, 'guril': 1461, 'sic': 1462, 'passage': 1463, 'rough': 1464, 'borne': 1465, 'breeze': 1466, 'boundless': 1467, 'stupendous': 1468, 'roll': 1469, 'thundering': 1470, 'motion': 1471, 'mermaids': 1472, 'fierce': 1473, 'tempest': 1474, 'gathers': 1475, 'oneill': 1476, 'odonnell': 1477, 'lucan': 1478, 'oconnell': 1479, 'brian': 1480, 'drove': 1481, 'danes': 1482, 'patrick': 1483, 'vermin': 1484, 'whose': 1485, 'benburb': 1486, 'blackwater': 1487, 'owen': 1488, 'roe': 1489, 'munroe': 1490, 'lambs': 1491, 'skip': 1492, 'views': 1493, 'enchanting': 1494, 'rostrevor': 1495, 'groves': 1496, 'lakes': 1497, 'ride': 1498, 'tide': 1499, 'majestic': 1500, 'shannon': 1501, 'sail': 1502, 'loch': 1503, 'neagh': 1504, 'ross': 1505, 'gorey': 1506, 'saxon': 1507, 'tory': 1508, 'soil': 1509, 'sanctified': 1510, 'enemies': 1511, 'links': 1512, 'encumbered': 1513, 'resound': 1514, 'hosannahs': 1515, 'bide': 1516, 'hushed': 1517, 'lying': 1518, 'kneel': 1519, 'ave': 1520, 'tread': 1521, 'fail': 1522, 'simply': 1523, 'gasworks': 1524, 'croft': 1525, 'dreamed': 1526, 'canal': 1527, 'factory': 1528, 'clouds': 1529, 'drifting': 1530, 'prowling': 1531, 'beat': 1532, 'springs': 1533, 'siren': 1534, 'docks': 1535, 'train': 1536, 'smelled': 1537, 'smokey': 1538, 'sharp': 1539, 'axe': 1540, 'steel': 1541, 'tempered': 1542, 'chop': 1543, 't': 1544, 'agree': 1545, 'leaning': 1546, 'weirs': 1547, 'ray': 1548, 'glow': 1549, 'changeless': 1550, 'constant': 1551, 'bounding': 1552, 'castles': 1553, 'sacked': 1554, 'scattered': 1555, 'fixed': 1556, 'endearing': 1557, 'gifts': 1558, 'fading': 1559, 'wouldst': 1560, 'adored': 1561, 'loveliness': 1562, 'ruin': 1563, 'itself': 1564, 'verdantly': 1565, 'unprofaned': 1566, 'fervor': 1567, 'faith': 1568, 'forgets': 1569, 'sunflower': 1570, 'rag': 1571, 'games': 1572, 'hold': 1573, 'defend': 1574, 'veteran': 1575, 'volunteers': 1576, 'pat': 1577, 'pearse': 1578, 'clark': 1579, 'macdonagh': 1580, 'macdiarmada': 1581, 'mcbryde': 1582, 'james': 1583, 'connolly': 1584, 'placed': 1585, 'machine': 1586, 'ranting': 1587, 'hour': 1588, 'bullet': 1589, 'stuck': 1590, 'craw': 1591, 'poisoning': 1592, 'ceannt': 1593, 'lions': 1594, 'union': 1595, 'poured': 1596, 'dismay': 1597, 'horror': 1598, 'englishmen': 1599, 'khaki': 1600, 'renown': 1601, 'fame': 1602, 'forefathers': 1603, 'blaze': 1604, 'priests': 1605, 'offer': 1606, 'charmin': 1607, 'variety': 1608, 'renownd': 1609, 'learnin': 1610, 'piety': 1611, 'advance': 1612, 'widout': 1613, 'impropriety': 1614, 'flowr': 1615, 'cho': 1616, 'powrfulest': 1617, 'preacher': 1618, 'tenderest': 1619, 'teacher': 1620, 'kindliest': 1621, 'donegal': 1622, 'talk': 1623, 'provost': 1624, 'trinity': 1625, 'famous': 1626, 'greek': 1627, 'latinity': 1628, 'divils': 1629, 'divinity': 1630, 'd': 1631, 'likes': 1632, 'logic': 1633, 'mythology': 1634, 'thayology': 1635, 'conchology': 1636, 'sinners': 1637, 'wishful': 1638, 'childer': 1639, 'avick': 1640, 'gad': 1641, 'flock': 1642, 'grandest': 1643, 'control': 1644, 'checking': 1645, 'coaxin': 1646, 'onaisy': 1647, 'lifting': 1648, 'avoidin': 1649, 'frivolity': 1650, 'seasons': 1651, 'innocent': 1652, 'jollity': 1653, 'playboy': 1654, 'claim': 1655, 'equality': 1656, 'comicality': 1657, 'bishop': 1658, 'lave': 1659, 'gaiety': 1660, 'laity': 1661, 'clergy': 1662, 'jewels': 1663, 'plundering': 1664, 'pillage': 1665, 'starved': 1666, 'cries': 1667, 'thems': 1668, 'bondage': 1669, 'fourth': 1670, 'tabhair': 1671, 'dom': 1672, 'lã¡mh': 1673, 'harmony': 1674, 'east': 1675, 'destroy': 1676, 'command': 1677, 'gesture': 1678, 'troubles': 1679, 'weak': 1680, 'peoples': 1681, 'creeds': 1682, 'lets': 1683, 'needs': 1684, 'passion': 1685, 'fashion': 1686, 'guide': 1687, 'share': 1688, 'sparkling': 1689, 'meeting': 1690, 'iull': 1691, 'contented': 1692, 'ache': 1693, 'painful': 1694, 'wrote': 1695, 'twisted': 1696, 'twined': 1697, 'cheek': 1698, 'bedim': 1699, 'holds': 1700, 'smiles': 1701, 'scarcely': 1702, 'darkning': 1703, 'beyond': 1704, 'yearn': 1705, 'laughs': 1706, 'humble': 1707, 'brightest': 1708, 'gleam': 1709, 'forgot': 1710, 'pulled': 1711, 'comb': 1712, 'counting': 1713, 'knock': 1714, 'murray': 1715, 'fellow': 1716, 'hail': 1717, 'tumblin': 1718, 'apple': 1719, 'pie': 1720, 'gets': 1721, 'doleful': 1722, 'enemy': 1723, 'nearly': 1724, 'slew': 1725, 'queer': 1726, 'mild': 1727, 'legs': 1728, 'indeed': 1729, 'island': 1730, 'sulloon': 1731, 'flesh': 1732, 'yere': 1733, 'armless': 1734, 'boneless': 1735, 'chickenless': 1736, 'egg': 1737, 'yell': 1738, 'bowl': 1739, 'rolling': 1740, 'swearing': 1741, 'rattled': 1742, 'saber': 1743, 'deceiver': 1744, 'rig': 1745, 'um': 1746, 'du': 1747, 'rum': 1748, 'jar': 1749, 'shinin': 1750, 'coins': 1751, 'promised': 1752, 'vowed': 1753, 'devils': 1754, 'awakened': 1755, 'six': 1756, 'guards': 1757, 'numbers': 1758, 'odd': 1759, 'flew': 1760, 'mistaken': 1761, 'mollys': 1762, 'robbing': 1763, 'sentry': 1764, 'sligo': 1765, 'fishin': 1766, 'bowlin': 1767, 'others': 1768, 'railroad': 1769, 'ties': 1770, 'crossings': 1771, 'swamps': 1772, 'elevations': 1773, 'resolved': 1774, 'sunset': 1775, 'higher': 1776, 'win': 1777, 'allegators': 1778, 'wood': 1779, 'treated': 1780, 'shoulders': 1781, 'paint': 1782, 'picture': 1783, 'vain': 1784, 'returned': 1785, 'cottage': 1786, 'sociable': 1787, 'foaming': 1788, 'n': 1789, 'jeremy': 1790, 'lanigan': 1791, 'battered': 1792, 'hadnt': 1793, 'pound': 1794, 'farm': 1795, 'acres': 1796, 'party': 1797, 'listen': 1798, 'glisten': 1799, 'rows': 1800, 'ructions': 1801, 'invitation': 1802, 'minute': 1803, 'bees': 1804, 'cask': 1805, 'judy': 1806, 'odaly': 1807, 'milliner': 1808, 'wink': 1809, 'peggy': 1810, 'mcgilligan': 1811, 'lashings': 1812, 'punch': 1813, 'cakes': 1814, 'bacon': 1815, 'tea': 1816, 'nolans': 1817, 'dolans': 1818, 'ogradys': 1819, 'sounded': 1820, 'taras': 1821, 'hall': 1822, 'nelly': 1823, 'gray': 1824, 'rat': 1825, 'catchers': 1826, 'doing': 1827, 'kinds': 1828, 'nonsensical': 1829, 'polkas': 1830, 'whirligig': 1831, 'julia': 1832, 'banished': 1833, 'nonsense': 1834, 'twist': 1835, 'jig': 1836, 'mavrone': 1837, 'mad': 1838, 'ceiling': 1839, 'brooks': 1840, 'academy': 1841, 'learning': 1842, 'learn': 1843, 'couples': 1844, 'groups': 1845, 'accident': 1846, 'happened': 1847, 'terrance': 1848, 'mccarthy': 1849, 'finnertys': 1850, 'hoops': 1851, 'cried': 1852, 'meelia': 1853, 'murther': 1854, 'gathered': 1855, 'carmody': 1856, 'further': 1857, 'satisfaction': 1858, 'midst': 1859, 'kerrigan': 1860, 'declared': 1861, 'painted': 1862, 'suppose': 1863, 'morgan': 1864, 'powerful': 1865, 'stretched': 1866, 'smashed': 1867, 'chaneys': 1868, 'runctions': 1869, 'lick': 1870, 'phelim': 1871, 'mchugh': 1872, 'replied': 1873, 'introduction': 1874, 'kicked': 1875, 'terrible': 1876, 'hullabaloo': 1877, 'piper': 1878, 'strangled': 1879, 'squeezed': 1880, 'bellows': 1881, 'chanters': 1882, 'entangled': 1883, 'gaily': 1884, 'mairis': 1885, 'hillways': 1886, 'myrtle': 1887, 'bracken': 1888, 'sheilings': 1889, 'sake': 1890, 'rowans': 1891, 'herring': 1892, 'meal': 1893, 'peat': 1894, 'creel': 1895, 'bairns': 1896, 'weel': 1897, 'toast': 1898, 'soar': 1899, 'blackbird': 1900, 'note': 1901, 'linnet': 1902, 'lure': 1903, 'cozy': 1904, 'catch': 1905, 'company': 1906, 'harm': 1907, 'wit': 1908, 'recall': 1909, 'leisure': 1910, 'awhile': 1911, 'sorely': 1912, 'ruby': 1913, 'enthralled': 1914, 'sorry': 1915, 'theyd': 1916, 'falls': 1917, 'lot': 1918, 'tuned': 1919, 'bough': 1920, 'cow': 1921, 'chanting': 1922, 'melodious': 1923, 'scarce': 1924, 'soothed': 1925, 'solace': 1926, 'courtesy': 1927, 'salute': 1928, 'amiable': 1929, 'captive': 1930, 'slave': 1931, 'future': 1932, 'banter': 1933, 'enamour': 1934, 'indies': 1935, 'afford': 1936, 'transparently': 1937, 'flame': 1938, 'add': 1939, 'fuel': 1940, 'grant': 1941, 'desire': 1942, 'expire': 1943, 'wealth': 1944, 'damer': 1945, 'african': 1946, 'devonshire': 1947, 'lamp': 1948, 'alladin': 1949, 'genie': 1950, 'also': 1951, 'withdraw': 1952, 'tease': 1953, 'single': 1954, 'airy': 1955, 'embarrass': 1956, 'besides': 1957, 'almanack': 1958, 'useless': 1959, 'date': 1960, 'ware': 1961, 'rate': 1962, 'fragrance': 1963, 'loses': 1964, 'consumed': 1965, 'october': 1966, 'knowing': 1967, 'steer': 1968, 'blast': 1969, 'danger': 1970, 'farthing': 1971, 'affection': 1972, 'enjoy': 1973, 'choose': 1974, 'killarneys': 1975, 'sister': 1976, 'pains': 1977, 'loss': 1978, 'tuam': 1979, 'saluted': 1980, 'drank': 1981, 'pint': 1982, 'smother': 1983, 'reap': 1984, 'cut': 1985, 'goblins': 1986, 'bought': 1987, 'brogues': 1988, 'rattling': 1989, 'bogs': 1990, 'frightning': 1991, 'dogs': 1992, 'hunt': 1993, 'hare': 1994, 'follol': 1995, 'rah': 1996, 'mullingar': 1997, 'rested': 1998, 'limbs': 1999, 'blithe': 2000, 'heartfrom': 2001, 'paddys': 2002, 'cure': 2003, 'lassies': 2004, 'laughing': 2005, 'curious': 2006, 'style': 2007, 'twould': 2008, 'bubblin': 2009, 'hired': 2010, 'wages': 2011, 'required': 2012, 'almost': 2013, 'deprived': 2014, 'stroll': 2015, 'quality': 2016, 'locality': 2017, 'something': 2018, 'wobblin': 2019, 'enquiring': 2020, 'rogue': 2021, 'brogue': 2022, 'wasnt': 2023, 'vogue': 2024, 'spirits': 2025, 'falling': 2026, 'jumped': 2027, 'aboard': 2028, 'pigs': 2029, 'rigs': 2030, 'jigs': 2031, 'bubbling': 2032, 'holyhead': 2033, 'wished': 2034, 'instead': 2035, 'bouys': 2036, 'liverpool': 2037, 'safely': 2038, 'fool': 2039, 'boil': 2040, 'temper': 2041, 'losing': 2042, 'abusing': 2043, 'shillelagh': 2044, 'nigh': 2045, 'hobble': 2046, 'load': 2047, 'hurray': 2048, 'joined': 2049, 'affray': 2050, 'quitely': 2051, 'cleared': 2052, 'host': 2053, 'march': 2054, 'faces': 2055, 'farmstead': 2056, 'fishers': 2057, 'ban': 2058, 'vengeance': 2059, 'hapless': 2060, 'about': 2061, 'hemp': 2062, 'rope': 2063, 'clung': 2064, 'grim': 2065, 'array': 2066, 'earnest': 2067, 'stalwart': 2068, 'stainless': 2069, 'banner': 2070, 'marching': 2071, 'torn': 2072, 'furious': 2073, 'odds': 2074, 'keen': 2075, 'toomebridge': 2076, 'treads': 2077, 'upwards': 2078, 'traveled': 2079, 'quarters': 2080, 'below': 2081, 'hogshead': 2082, 'stack': 2083, 'stagger': 2084, 'dig': 2085, 'hole': 2086, 'couple': 2087, 'scratch': 2088, 'consolation': 2089, 'tyrant': 2090, 'remorseless': 2091, 'foe': 2092, 'lift': 2093, 'stranded': 2094, 'prince': 2095, 'edward': 2096, 'coffee': 2097, 'trace': 2098, 'fiddlin': 2099, 'dime': 2100, 'shy': 2101, 'hello': 2102, 'wintry': 2103, 'yellow': 2104, 'somewhere': 2105, 'written': 2106, 'begin': 2107, 'tap': 2108, 'caught': 2109, 'leap': 2110, 'clumsy': 2111, 'graceful': 2112, 'fiddlers': 2113, 'everywhere': 2114, 'boots': 2115, 'laughtcr': 2116, 'suits': 2117, 'easter': 2118, 'gowns': 2119, 'sailors': 2120, 'pianos': 2121, 'setting': 2122, 'someones': 2123, 'hats': 2124, 'rack': 2125, 'chair': 2126, 'wooden': 2127, 'feels': 2128, 'touch': 2129, 'awaitin': 2130, 'thc': 2131, 'fiddles': 2132, 'closet': 2133, 'strings': 2134, 'tbe': 2135, 'covers': 2136, 'buttoned': 2137, 'sometimes': 2138, 'melody': 2139, 'passes': 2140, 'slight': 2141, 'lack': 2142, 'moved': 2143, 'homeward': 2144, 'swan': 2145, 'moves': 2146, 'goods': 2147, 'gear': 2148, 'din': 2149, 'rude': 2150, 'wherein': 2151, 'dwell': 2152, 'abandon': 2153, 'energy': 2154, 'blight': 2155, 'praties': 2156, 'sheep': 2157, 'cattle': 2158, 'taxes': 2159, 'unpaid': 2160, 'redeem': 2161, 'bleak': 2162, 'landlord': 2163, 'sheriff': 2164, 'spleen': 2165, 'heaved': 2166, 'sigh': 2167, 'bade': 2168, 'goodbye': 2169, 'stony': 2170, 'anguish': 2171, 'seeing': 2172, 'feeble': 2173, 'frame': 2174, 'wrapped': 2175, 'c�ta': 2176, 'm�r': 2177, 'unseen': 2178, 'stern': 2179, 'rally': 2180, 'cheer': 2181, 'revenge': 2182, 'waking': 2183, 'wisdom': 2184, 'dwelling': 2185, 'battleshield': 2186, 'dignity': 2187, 'shelter': 2188, 'heed': 2189, 'inheritance': 2190, 'heavem': 2191, 'heaven': 2192, 'victory': 2193, 'reach': 2194, 'whatever': 2195, 'befall': 2196, 'ruler': 2197, 'pleasant': 2198, 'rambling': 2199, 'board': 2200, 'followed': 2201, 'shortly': 2202, 'anchor': 2203, '23rd': 2204, 'lrelands': 2205, 'daughters': 2206, 'crowds': 2207, 'assembled': 2208, 'fulfill': 2209, 'jovial': 2210, 'conversations': 2211, 'neighbors': 2212, 'turning': 2213, 'tailor': 2214, 'quigley': 2215, 'bould': 2216, 'britches': 2217, 'lived': 2218, 'flying': 2219, 'dove': 2220, 'hiii': 2221, 'dreamt': 2222, 'joking': 2223, 'manys': 2224, 'cock': 2225, 'shrill': 2226, 'awoke': 2227, 'california': 2228, 'miles': 2229, 'banbridge': 2230, 'july': 2231, 'boreen': 2232, 'sheen': 2233, 'coaxing': 2234, 'elf': 2235, 'shake': 2236, 'bantry': 2237, 'onward': 2238, 'sped': 2239, 'gazed': 2240, 'passerby': 2241, 'gem': 2242, 'irelands': 2243, 'travelled': 2244, 'hit': 2245, 'career': 2246, 'square': 2247, 'surrendered': 2248, 'tenant': 2249, 'shawl': 2250, 'gown': 2251, 'crossroads': 2252, 'dress': 2253, 'try': 2254, 'sheeps': 2255, 'deludhering': 2256, 'yoke': 2257, 'rust': 2258, 'plow': 2259, 'fireside': 2260, 'sits': 2261, 'whistle': 2262, 'changing': 2263, 'fright': 2264, 'downfall': 2265, 'cornwall': 2266, 'parlour': 2267, 'passing': 2268, 'william': 2269, 'betray': 2270, 'guinea': 2271, 'walking': 2272, 'mounted': 2273, 'platform': 2274, 'deny': 2275, 'walked': 2276, 'margin': 2277, 'lough': 2278, 'leane': 2279, 'bloomed': 2280, 'whom': 2281, 'cap': 2282, 'cloak': 2283, 'glossy': 2284, 'pail': 2285, 'palm': 2286, 'venus': 2287, 'bank': 2288, 'travelians': 2289, 'babes': 2290, 'freebirds': 2291, 'grew': 2292, 'matters': 2293, 'famine': 2294, 'rebelled': 2295, 'windswept': 2296, 'harbour': 2297, 'botany': 2298, 'whilst': 2299, 'wan': 2300, 'cloud': 2301, 'shannons': 2302, 'returnd': 2303, 'doubts': 2304, 'fears': 2305, 'aching': 2306, 'seemd': 2307, 'mingling': 2308, 'flood': 2309, 'path': 2310, 'wrath': 2311, 'lamenting': 2312, 'sudden': 2313, 'kissd': 2314, 'showrs': 2315, 'flowing': 2316, 'laughd': 2317, 'beam': 2318, 'soared': 2319, 'aloft': 2320, 'phantom': 2321, 'outspread': 2322, 'throbbing': 2323, 'hid': 2324, 'treasures': 2325, 'pots': 2326, 'tin': 2327, 'cans': 2328, 'mash': 2329, 'bran': 2330, 'barney': 2331, 'peeled': 2332, 'searching': 2333, 'connemara': 2334, 'butcher': 2335, 'quart': 2336, 'bottle': 2337, 'help': 2338, 'gate': 2339, 'glory': 2340, 'lane': 2341, 'village': 2342, 'church': 2343, 'spire': 2344, 'graveyard': 2345, 'baby': 2346, 'blessing': 2347, 'hoping': 2348, 'trust': 2349, 'strength': 2350, 'thank': 2351, 'bidding': 2352, 'bread': 2353, 'shines': 2354, 'fifty': 2355, 'often': 2356, 'shut': 2357, 'frisky': 2358, 'pig': 2359, 'whisky': 2360, 'uncle': 2361, 'enlisted': 2362, 'trudged': 2363, 'bosom': 2364, 'daisy': 2365, 'drubbing': 2366, 'shirts': 2367, 'battle': 2368, 'blows': 2369, 'pate': 2370, 'bothered': 2371, 'rarely': 2372, 'dropped': 2373, 'honest': 2374, 'thinks': 2375, 'eight': 2376, 'score': 2377, 'basin': 2378, 'zoo': 2379, 'everybody': 2380, 'calls': 2381, 'trades': 2382, 'dinner': 2383, 'slip': 2384, 'corner': 2385, 'barn': 2386, 'currabawn': 2387, 'shocking': 2388, 'wet': 2389, 'raindrops': 2390, 'rats': 2391, 'peek': 2392, 'waken': 2393, 'spotted': 2394, 'apron': 2395, 'calico': 2396, 'blouse': 2397, 'frighten': 2398, 'afraid': 2399, 'flaxen': 2400, 'haired': 2401, 'rags': 2402, 'tags': 2403, 'leggins': 2404, 'collar': 2405, 'tie': 2406, 'goggles': 2407, 'fashioned': 2408, 'bag': 2409, 'bulging': 2410, 'sack': 2411, 'peeping': 2412, 'skin': 2413, 'rink': 2414, 'doodle': 2415, 'getting': 2416, 'raked': 2417, 'gladness': 2418, 'tuning': 2419, 'fills': 2420, 'eily': 2421, 'prouder': 2422, 'thady': 2423, 'boldly': 2424, 'lasses': 2425, 'fled': 2426, 'silent': 2427, 'glad': 2428, 'echo': 2429, 'companions': 2430, 'soars': 2431, 'enchanted': 2432, 'granted': 2433, 'adoration': 2434, 'gives': 2435, 'joyous': 2436, 'elation': 2437, 'covered': 2438, 'winter': 2439, 'riding': 2440, 'cherry': 2441, 'coal': 2442, 'falter': 2443, 'bowed': 2444, 'bonnet': 2445, 'courteous': 2446, 'looks': 2447, 'engaging': 2448, 'sell': 2449, 'purse': 2450, 'yearly': 2451, 'need': 2452, 'market': 2453, 'gain': 2454, 'dearly': 2455, 'tarry': 2456, 'although': 2457, 'parlay': 2458, 'ranks': 2459, 'girded': 2460, 'slung': 2461, 'warrior': 2462, 'bard': 2463, 'betrays': 2464, 'rights': 2465, 'faithful': 2466, 'chords': 2467, 'asunder': 2468, 'sully': 2469, 'bravry': 2470, 'londons': 2471, 'sight': 2472, 'workin': 2473, 'sow': 2474, 'wheat': 2475, 'gangs': 2476, 'sweep': 2477, 'expressed': 2478, 'london': 2479, 'top': 2480, 'dresses': 2481, 'bath': 2482, 'startin': 2483, 'fashions': 2484, 'mccree': 2485, 'nature': 2486, 'designed': 2487, 'complexions': 2488, 'cream': 2489, 'regard': 2490, 'sip': 2491, 'colors': 2492, 'wait': 2493, 'waitin': 2494, 'sweeps': 2495, 'beauing': 2496, 'belling': 2497, 'windows': 2498, 'cursing': 2499, 'faster': 2500, 'waiters': 2501, 'bailiffs': 2502, 'duns': 2503, 'bacchus': 2504, 'begotten': 2505, 'politicians': 2506, 'funds': 2507, 'dadda': 2508, 'living': 2509, 'drives': 2510, 'having': 2511, 'racking': 2512, 'tenants': 2513, 'stewards': 2514, 'teasing': 2515, 'raising': 2516, 'wishing': 2517, 'sunny': 2518, 'doves': 2519, 'coo': 2520, 'neath': 2521, 'sunbeam': 2522, 'robin': 2523, 'waters': 2524, 'larks': 2525, 'join': 2526, 'breaks': 2527, 'oftimes': 2528, 'lilies': 2529, 'declining': 2530, 'vale': 2531, 'shades': 2532, 'mantle': 2533, 'spreading': 2534, 'listening': 2535, 'shedding': 2536, 'beginning': 2537, 'spinning': 2538, 'blind': 2539, 'drowsily': 2540, 'knitting': 2541, 'cheerily': 2542, 'noiselessly': 2543, 'whirring': 2544, 'foots': 2545, 'stirring': 2546, 'sprightly': 2547, 'chara': 2548, 'tapping': 2549, 'ivy': 2550, 'flapping': 2551, 'somebody': 2552, 'sighing': 2553, 'autumn': 2554, 'noise': 2555, 'chirping': 2556, 'holly': 2557, 'shoving': 2558, 'wrong': 2559, 'coolin': 2560, 'casement': 2561, 'rove': 2562, 'moons': 2563, 'brightly': 2564, 'shakes': 2565, 'lays': 2566, 'longs': 2567, 'lingers': 2568, 'glance': 2569, 'puts': 2570, 'lazily': 2571, 'easily': 2572, 'lowly': 2573, 'reels': 2574, 'noiseless': 2575, 'leaps': 2576, 'ere': 2577, 'lovers': 2578, 'roved': 2579, 'verdant': 2580, 'braes': 2581, 'skreen': 2582, 'countrie': 2583, 'foreign': 2584, 'strand': 2585, 'dewy': 2586, 'climb': 2587, 'rob': 2588, 'boat': 2589, 'sails': 2590, 'loaded': 2591, 'sink': 2592, 'leaned': 2593, 'oak': 2594, 'trusty': 2595, 'false': 2596, 'reached': 2597, 'pricked': 2598, 'waxes': 2599, 'fades': 2600, 'wholl': 2601, 'cockle': 2602, 'gloom': 2603, 'news': 2604, 'forbid': 2605, 'patricks': 2606, 'napper': 2607, 'tandy': 2608, 'hows': 2609, 'distressful': 2610, 'englands': 2611, 'remind': 2612, 'pull': 2613, 'throw': 2614, 'sod': 2615, 'root': 2616, 'underfoot': 2617, 'laws': 2618, 'blades': 2619, 'growin': 2620, 'dare': 2621, 'show': 2622, 'caubeen': 2623, 'year': 2624, 'returning': 2625, 'store': 2626, 'ale': 2627, 'frequent': 2628, 'landlady': 2629, 'credit': 2630, 'custom': 2631, 'sovereigns': 2632, 'landladys': 2633, 'wines': 2634, 'confess': 2635, 'pardon': 2636, 'prodigal': 2637, 'caress': 2638, 'forgive': 2639, 'ofttimes': 2640, 'wondering': 2641, 'powr': 2642, 'beguile': 2643, 'teardrop': 2644, 'lilting': 2645, 'laughters': 2646, 'twinkle': 2647, 'lilt': 2648, 'seems': 2649, 'linnets': 2650, 'real': 2651, 'regret': 2652, 'throughout': 2653, 'youths': 2654, 'chance': 2655, 'spied': 2656, 'receiver': 2657, 'counted': 2658, 'penny': 2659, 'bu': 2660, 'rungum': 2661, 'chamber': 2662, 'course': 2663, 'charges': 2664, 'filled': 2665, 'ready': 2666, 'footmen': 2667, 'likewise': 2668, 'draw': 2669, 'pistol': 2670, 'couldnt': 2671, 'shoot': 2672, 'robbin': 2673, 'jailer': 2674, 'tight': 2675, 'fisted': 2676, 'army': 2677, 'stationed': 2678, 'cork': 2679, 'roamin': 2680, 'swear': 2681, 'treat': 2682, 'sportin': 2683, 'hurley': 2684, 'bollin': 2685, 'maids': 2686, 'summertime': 2687, 'pluck': 2688, 'yon': 2689}
total words: 2690
###Markdown
Preprocessing the DatasetNext, you will generate the inputs and labels for your model. The process will be identical to the previous lab. The `xs` or inputs to the model will be padded sequences, while the `ys` or labels are one-hot encoded arrays.
###Code
# Initialize the sequences list
input_sequences = []
# Loop over every line
for line in corpus:
# Tokenize the current line
token_list = tokenizer.texts_to_sequences([line])[0]
# Loop over the line several times to generate the subphrases
for i in range(1, len(token_list)):
# Generate the subphrase
n_gram_sequence = token_list[:i+1]
# Append the subphrase to the sequences list
input_sequences.append(n_gram_sequence)
# Get the length of the longest line
max_sequence_len = max([len(x) for x in input_sequences])
# Pad all sequences
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
# Create inputs and label by splitting the last token in the subphrases
xs, labels = input_sequences[:,:-1],input_sequences[:,-1]
# Convert the label into one-hot arrays
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
###Output
_____no_output_____
###Markdown
You can then print some of the examples as a sanity check.
###Code
# Get sample sentence
sentence = corpus[0].split()
print(f'sample sentence: {sentence}')
# Initialize token list
token_list = []
# Look up the indices of each word and append to the list
for word in sentence:
token_list.append(tokenizer.word_index[word])
# Print the token list
print(token_list)
# Pick element
elem_number = 5
# Print token list and phrase
print(f'token list: {xs[elem_number]}')
print(f'decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}')
# Print label
print(f'one-hot label: {ys[elem_number]}')
print(f'index of label: {np.argmax(ys[elem_number])}')
# Pick element
elem_number = 4
# Print token list and phrase
print(f'token list: {xs[elem_number]}')
print(f'decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}')
# Print label
print(f'one-hot label: {ys[elem_number]}')
print(f'index of label: {np.argmax(ys[elem_number])}')
###Output
token list: [ 0 0 0 0 0 0 0 0 0 0 51 12 96 1217
48]
decoded to text: ['come all ye maidens young']
one-hot label: [0. 0. 1. ... 0. 0. 0.]
index of label: 2
###Markdown
Build and compile the ModelNext, you will build and compile the model. We placed some of the hyperparameters at the top of the code cell so you can easily tweak it later if you want.
###Code
# Hyperparameters
embedding_dim = 100
lstm_units = 150
learning_rate = 0.01
# Build the model
model = Sequential([
Embedding(total_words, embedding_dim, input_length=max_sequence_len-1),
Bidirectional(LSTM(lstm_units)),
Dense(total_words, activation='softmax')
])
# Use categorical crossentropy because this is a multi-class problem
model.compile(
loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
metrics=['accuracy']
)
# Print the model summary
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 15, 100) 269000
bidirectional (Bidirectiona (None, 300) 301200
l)
dense (Dense) (None, 2690) 809690
=================================================================
Total params: 1,379,890
Trainable params: 1,379,890
Non-trainable params: 0
_________________________________________________________________
###Markdown
Train the modelFrom the model summary above, you'll notice that the number of trainable params is much larger than the one in the previous lab. Consequently, that usually means a slower training time. It will take roughly 7 seconds per epoch with the GPU enabled in Colab and you'll reach around 76% accuracy after 100 epochs.
###Code
epochs = 100
# Train the model
history = model.fit(xs, ys, epochs=epochs)
###Output
Epoch 1/100
377/377 [==============================] - 12s 13ms/step - loss: 6.6624 - accuracy: 0.0743
Epoch 2/100
377/377 [==============================] - 5s 13ms/step - loss: 5.7789 - accuracy: 0.1134
Epoch 3/100
377/377 [==============================] - 5s 13ms/step - loss: 4.9567 - accuracy: 0.1603
Epoch 4/100
377/377 [==============================] - 9s 23ms/step - loss: 4.0958 - accuracy: 0.2250
Epoch 5/100
377/377 [==============================] - 5s 14ms/step - loss: 3.2908 - accuracy: 0.3183
Epoch 6/100
377/377 [==============================] - 5s 14ms/step - loss: 2.6421 - accuracy: 0.4158
Epoch 7/100
377/377 [==============================] - 5s 13ms/step - loss: 2.1288 - accuracy: 0.5058
Epoch 8/100
377/377 [==============================] - 5s 13ms/step - loss: 1.8651 - accuracy: 0.5619
Epoch 9/100
377/377 [==============================] - 5s 13ms/step - loss: 1.8455 - accuracy: 0.5704
Epoch 10/100
377/377 [==============================] - 5s 12ms/step - loss: 1.9100 - accuracy: 0.5522
Epoch 11/100
377/377 [==============================] - 5s 12ms/step - loss: 1.5292 - accuracy: 0.6266
Epoch 12/100
377/377 [==============================] - 5s 12ms/step - loss: 1.3195 - accuracy: 0.6789
Epoch 13/100
377/377 [==============================] - 4s 12ms/step - loss: 1.1632 - accuracy: 0.7147
Epoch 14/100
377/377 [==============================] - 4s 12ms/step - loss: 1.0842 - accuracy: 0.7359
Epoch 15/100
377/377 [==============================] - 5s 12ms/step - loss: 1.0558 - accuracy: 0.7421
Epoch 16/100
377/377 [==============================] - 5s 12ms/step - loss: 1.0833 - accuracy: 0.7320
Epoch 17/100
377/377 [==============================] - 5s 12ms/step - loss: 1.2672 - accuracy: 0.6916
Epoch 18/100
377/377 [==============================] - 5s 12ms/step - loss: 1.4212 - accuracy: 0.6437
Epoch 19/100
377/377 [==============================] - 5s 12ms/step - loss: 1.3526 - accuracy: 0.6565
Epoch 20/100
377/377 [==============================] - 5s 13ms/step - loss: 1.1529 - accuracy: 0.7073
Epoch 21/100
377/377 [==============================] - 5s 12ms/step - loss: 1.0316 - accuracy: 0.7371
Epoch 22/100
377/377 [==============================] - 4s 12ms/step - loss: 0.9620 - accuracy: 0.7556
Epoch 23/100
377/377 [==============================] - 4s 12ms/step - loss: 0.9342 - accuracy: 0.7654
Epoch 24/100
377/377 [==============================] - 4s 12ms/step - loss: 0.9536 - accuracy: 0.7579
Epoch 25/100
377/377 [==============================] - 4s 12ms/step - loss: 1.0658 - accuracy: 0.7282
Epoch 26/100
377/377 [==============================] - 4s 12ms/step - loss: 1.1425 - accuracy: 0.7076
Epoch 27/100
377/377 [==============================] - 4s 12ms/step - loss: 1.1501 - accuracy: 0.7043
Epoch 28/100
377/377 [==============================] - 5s 13ms/step - loss: 1.1479 - accuracy: 0.7119
Epoch 29/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0611 - accuracy: 0.7293
Epoch 30/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9942 - accuracy: 0.7424
Epoch 31/100
377/377 [==============================] - 5s 12ms/step - loss: 0.9565 - accuracy: 0.7532
Epoch 32/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9157 - accuracy: 0.7687
Epoch 33/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9315 - accuracy: 0.7632
Epoch 34/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0534 - accuracy: 0.7328
Epoch 35/100
377/377 [==============================] - 5s 13ms/step - loss: 1.1424 - accuracy: 0.7089
Epoch 36/100
377/377 [==============================] - 5s 12ms/step - loss: 1.2541 - accuracy: 0.6834
Epoch 37/100
377/377 [==============================] - 5s 13ms/step - loss: 1.1307 - accuracy: 0.7102
Epoch 38/100
377/377 [==============================] - 5s 12ms/step - loss: 1.0008 - accuracy: 0.7480
Epoch 39/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9327 - accuracy: 0.7654
Epoch 40/100
377/377 [==============================] - 5s 13ms/step - loss: 0.8675 - accuracy: 0.7804
Epoch 41/100
377/377 [==============================] - 5s 13ms/step - loss: 0.8628 - accuracy: 0.7791
Epoch 42/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9397 - accuracy: 0.7621
Epoch 43/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9848 - accuracy: 0.7503
Epoch 44/100
377/377 [==============================] - 5s 13ms/step - loss: 1.1162 - accuracy: 0.7186
Epoch 45/100
377/377 [==============================] - 5s 13ms/step - loss: 1.1271 - accuracy: 0.7127
Epoch 46/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0892 - accuracy: 0.7221
Epoch 47/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0234 - accuracy: 0.7363
Epoch 48/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9484 - accuracy: 0.7529
Epoch 49/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9310 - accuracy: 0.7604
Epoch 50/100
377/377 [==============================] - 5s 13ms/step - loss: 0.8657 - accuracy: 0.7760
Epoch 51/100
377/377 [==============================] - 5s 14ms/step - loss: 0.8812 - accuracy: 0.7755
Epoch 52/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9311 - accuracy: 0.7600
Epoch 53/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0388 - accuracy: 0.7336
Epoch 54/100
377/377 [==============================] - 5s 13ms/step - loss: 1.1026 - accuracy: 0.7207
Epoch 55/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0074 - accuracy: 0.7377
Epoch 56/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9333 - accuracy: 0.7578
Epoch 57/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9099 - accuracy: 0.7633
Epoch 58/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9027 - accuracy: 0.7703
Epoch 59/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9188 - accuracy: 0.7628
Epoch 60/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9288 - accuracy: 0.7577
Epoch 61/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9719 - accuracy: 0.7484
Epoch 62/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0426 - accuracy: 0.7305
Epoch 63/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0666 - accuracy: 0.7266
Epoch 64/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9873 - accuracy: 0.7447
Epoch 65/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9243 - accuracy: 0.7640
Epoch 66/100
377/377 [==============================] - 5s 13ms/step - loss: 0.8992 - accuracy: 0.7695
Epoch 67/100
377/377 [==============================] - 5s 12ms/step - loss: 0.9328 - accuracy: 0.7618
Epoch 68/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9412 - accuracy: 0.7632
Epoch 69/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9210 - accuracy: 0.7635
Epoch 70/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9243 - accuracy: 0.7690
Epoch 71/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9138 - accuracy: 0.7706
Epoch 72/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9073 - accuracy: 0.7660
Epoch 73/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9899 - accuracy: 0.7504
Epoch 74/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0726 - accuracy: 0.7345
Epoch 75/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0791 - accuracy: 0.7300
Epoch 76/100
377/377 [==============================] - 5s 13ms/step - loss: 1.1067 - accuracy: 0.7236
Epoch 77/100
377/377 [==============================] - 5s 12ms/step - loss: 1.0087 - accuracy: 0.7402
Epoch 78/100
377/377 [==============================] - 5s 12ms/step - loss: 0.9395 - accuracy: 0.7583
Epoch 79/100
377/377 [==============================] - 5s 13ms/step - loss: 0.8820 - accuracy: 0.7756
Epoch 80/100
377/377 [==============================] - 5s 12ms/step - loss: 0.8302 - accuracy: 0.7907
Epoch 81/100
377/377 [==============================] - 5s 12ms/step - loss: 0.8078 - accuracy: 0.7952
Epoch 82/100
377/377 [==============================] - 5s 12ms/step - loss: 0.8118 - accuracy: 0.7961
Epoch 83/100
377/377 [==============================] - 5s 13ms/step - loss: 0.8963 - accuracy: 0.7740
Epoch 84/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9900 - accuracy: 0.7509
Epoch 85/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0378 - accuracy: 0.7346
Epoch 86/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0878 - accuracy: 0.7278
Epoch 87/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0381 - accuracy: 0.7392
Epoch 88/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9745 - accuracy: 0.7550
Epoch 89/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9230 - accuracy: 0.7650
Epoch 90/100
377/377 [==============================] - 5s 13ms/step - loss: 0.8738 - accuracy: 0.7761
Epoch 91/100
377/377 [==============================] - 5s 13ms/step - loss: 0.8617 - accuracy: 0.7858
Epoch 92/100
377/377 [==============================] - 5s 12ms/step - loss: 0.8541 - accuracy: 0.7838
Epoch 93/100
377/377 [==============================] - 5s 12ms/step - loss: 0.9146 - accuracy: 0.7667
Epoch 94/100
377/377 [==============================] - 5s 12ms/step - loss: 0.9843 - accuracy: 0.7559
Epoch 95/100
377/377 [==============================] - 5s 13ms/step - loss: 0.9723 - accuracy: 0.7536
Epoch 96/100
377/377 [==============================] - 6s 15ms/step - loss: 0.9471 - accuracy: 0.7609
Epoch 97/100
377/377 [==============================] - 6s 16ms/step - loss: 0.9598 - accuracy: 0.7560
Epoch 98/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0502 - accuracy: 0.7408
Epoch 99/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0430 - accuracy: 0.7407
Epoch 100/100
377/377 [==============================] - 5s 13ms/step - loss: 1.0541 - accuracy: 0.7369
###Markdown
You can visualize the accuracy below to see how it fluctuates as the training progresses.
###Code
import matplotlib.pyplot as plt
# Plot utility
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.show()
# Visualize the accuracy
plot_graphs(history, 'accuracy')
###Output
_____no_output_____
###Markdown
Generating TextNow you can let the model make its own songs or poetry! Because it is trained on a much larger corpus, the results below should contain less repetitions as before. The code below picks the next word based on the highest probability output.
###Code
# Define seed text
seed_text = "help me obi-wan kinobi youre my only hope"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
# Convert the seed text to a token sequence
token_list = tokenizer.texts_to_sequences([seed_text])[0]
# Pad the sequence
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
# Feed to the model and get the probabilities for each index
probabilities = model.predict(token_list)
# Get the index with the highest probability
predicted = np.argmax(probabilities, axis=-1)[0]
# Ignore if index is 0 because that is just the padding.
if predicted != 0:
# Look up the word associated with the index.
output_word = tokenizer.index_word[predicted]
# Combine with the seed text
seed_text += " " + output_word
# Print the result
print(seed_text)
###Output
help me obi-wan kinobi youre my only hope to prove true love seen he seen i lived gone in gone the friends and gone and sing enough in spirit were lost good sea and please neer have ye look so queer and gone and merrily ringing and moving your eyes they were covered with snow white to erin away i venture and gone and left no chains shall her toes her rosy brogue golden than any this drowsy town they got know not back revealing upon fly again barrow proud gone by now as rings as plenty as sing only fellows of course were bare sped so dearly
###Markdown
Here again is the code that gets the top 3 predictions and picks one at random.
###Code
# Define seed text
seed_text = "help me obi-wan kinobi youre my only hope"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
# Convert the seed text to a token sequence
token_list = tokenizer.texts_to_sequences([seed_text])[0]
# Pad the sequence
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
# Feed to the model and get the probabilities for each index
probabilities = model.predict(token_list)
# Pick a random number from [1,2,3]
choice = np.random.choice([1,2,3])
# Sort the probabilities in ascending order
# and get the random choice from the end of the array
predicted = np.argsort(probabilities)[0][-choice]
# Ignore if index is 0 because that is just the padding.
if predicted != 0:
# Look up the word associated with the index.
output_word = tokenizer.index_word[predicted]
# Combine with the seed text
seed_text += " " + output_word
# Print the result
print(seed_text)
###Output
help me obi-wan kinobi youre my only hope paddy on so in july so over wid more thats no longer gone it must ive ended my violin without man glisten well no more till me rarely she no purer rate well rising above is quite wild and wide him gone and green there for the more birds chirping all the sea and the crown rag ran with whiskey and gone by each eyes gone her darling since but gone for their know all drown in the morning before upon knew ye widout hat and all lips mary that i smoke now in prison wall now swings far ones
###Markdown
Ungraded Lab: Generating Text from Irish LyricsIn the previous lab, you trained a model on just a single song. You might have found that the output text can quickly become gibberish or repetitive. Even if you tweak the hyperparameters, the model will still be limited by its vocabulary of only 263 words. The model will be more flexible if you train it on a much larger corpus and that's what you'll be doing in this lab. You will use lyrics from more Irish songs then see how the generated text looks like. You will also see how this impacts the process from data preparation to model training. Let's get started! Imports
###Code
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
###Output
_____no_output_____
###Markdown
Building the Word VocabularyYou will first download the lyrics dataset. These will be from a compilation of traditional Irish songs and you can see them [here](https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/main/C3/W4/misc/Laurences_generated_poetry.txt).
###Code
# Download the dataset
!wget https://storage.googleapis.com/tensorflow-1-public/course3/irish-lyrics-eof.txt
###Output
_____no_output_____
###Markdown
Next, you will lowercase and split the plain text into a list of sentences:
###Code
# Load the dataset
data = open('./irish-lyrics-eof.txt').read()
# Lowercase and split the text
corpus = data.lower().split("\n")
# Preview the result
print(corpus)
###Output
_____no_output_____
###Markdown
From here, you can initialize the `Tokenizer` class and generate the word index dictionary:
###Code
# Initialize the Tokenizer class
tokenizer = Tokenizer()
# Generate the word index dictionary
tokenizer.fit_on_texts(corpus)
# Define the total words. You add 1 for the index `0` which is just the padding token.
total_words = len(tokenizer.word_index) + 1
print(f'word index dictionary: {tokenizer.word_index}')
print(f'total words: {total_words}')
###Output
_____no_output_____
###Markdown
Preprocessing the DatasetNext, you will generate the inputs and labels for your model. The process will be identical to the previous lab. The `xs` or inputs to the model will be padded sequences, while the `ys` or labels are one-hot encoded arrays.
###Code
# Initialize the sequences list
input_sequences = []
# Loop over every line
for line in corpus:
# Tokenize the current line
token_list = tokenizer.texts_to_sequences([line])[0]
# Loop over the line several times to generate the subphrases
for i in range(1, len(token_list)):
# Generate the subphrase
n_gram_sequence = token_list[:i+1]
# Append the subphrase to the sequences list
input_sequences.append(n_gram_sequence)
# Get the length of the longest line
max_sequence_len = max([len(x) for x in input_sequences])
# Pad all sequences
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
# Create inputs and label by splitting the last token in the subphrases
xs, labels = input_sequences[:,:-1],input_sequences[:,-1]
# Convert the label into one-hot arrays
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
###Output
_____no_output_____
###Markdown
You can then print some of the examples as a sanity check.
###Code
# Get sample sentence
sentence = corpus[0].split()
print(f'sample sentence: {sentence}')
# Initialize token list
token_list = []
# Look up the indices of each word and append to the list
for word in sentence:
token_list.append(tokenizer.word_index[word])
# Print the token list
print(token_list)
# Pick element
elem_number = 5
# Print token list and phrase
print(f'token list: {xs[elem_number]}')
print(f'decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}')
# Print label
print(f'one-hot label: {ys[elem_number]}')
print(f'index of label: {np.argmax(ys[elem_number])}')
# Pick element
elem_number = 4
# Print token list and phrase
print(f'token list: {xs[elem_number]}')
print(f'decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}')
# Print label
print(f'one-hot label: {ys[elem_number]}')
print(f'index of label: {np.argmax(ys[elem_number])}')
###Output
_____no_output_____
###Markdown
Build and compile the ModelNext, you will build and compile the model. We placed some of the hyperparameters at the top of the code cell so you can easily tweak it later if you want.
###Code
# Hyperparameters
embedding_dim = 100
lstm_units = 150
learning_rate = 0.01
# Build the model
model = Sequential([
Embedding(total_words, embedding_dim, input_length=max_sequence_len-1),
Bidirectional(LSTM(lstm_units)),
Dense(total_words, activation='softmax')
])
# Use categorical crossentropy because this is a multi-class problem
model.compile(
loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
metrics=['accuracy']
)
# Print the model summary
model.summary()
###Output
_____no_output_____
###Markdown
Train the modelFrom the model summary above, you'll notice that the number of trainable params is much larger than the one in the previous lab. Consequently, that usually means a slower training time. It will take roughly 7 seconds per epoch with the GPU enabled in Colab and you'll reach around 76% accuracy after 100 epochs.
###Code
epochs = 100
# Train the model
history = model.fit(xs, ys, epochs=epochs)
###Output
_____no_output_____
###Markdown
You can visualize the accuracy below to see how it fluctuates as the training progresses.
###Code
import matplotlib.pyplot as plt
# Plot utility
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.show()
# Visualize the accuracy
plot_graphs(history, 'accuracy')
###Output
_____no_output_____
###Markdown
Generating TextNow you can let the model make its own songs or poetry! Because it is trained on a much larger corpus, the results below should contain less repetitions as before. The code below picks the next word based on the highest probability output.
###Code
# Define seed text
seed_text = "help me obi-wan kinobi youre my only hope"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
# Convert the seed text to a token sequence
token_list = tokenizer.texts_to_sequences([seed_text])[0]
# Pad the sequence
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
# Feed to the model and get the probabilities for each index
probabilities = model.predict(token_list)
# Get the index with the highest probability
predicted = np.argmax(probabilities, axis=-1)[0]
# Ignore if index is 0 because that is just the padding.
if predicted != 0:
# Look up the word associated with the index.
output_word = tokenizer.index_word[predicted]
# Combine with the seed text
seed_text += " " + output_word
# Print the result
print(seed_text)
###Output
_____no_output_____
###Markdown
Here again is the code that gets the top 3 predictions and picks one at random.
###Code
# Define seed text
seed_text = "help me obi-wan kinobi youre my only hope"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
# Convert the seed text to a token sequence
token_list = tokenizer.texts_to_sequences([seed_text])[0]
# Pad the sequence
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
# Feed to the model and get the probabilities for each index
probabilities = model.predict(token_list)
# Pick a random number from [1,2,3]
choice = np.random.choice([1,2,3])
# Sort the probabilities in ascending order
# and get the random choice from the end of the array
predicted = np.argsort(probabilities)[0][-choice]
# Ignore if index is 0 because that is just the padding.
if predicted != 0:
# Look up the word associated with the index.
output_word = tokenizer.index_word[predicted]
# Combine with the seed text
seed_text += " " + output_word
# Print the result
print(seed_text)
###Output
_____no_output_____
###Markdown
Ungraded Lab: Generating Text from Irish LyricsIn the previous lab, you trained a model on just a single song. You might have found that the output text can quickly become gibberish or repetitive. Even if you tweak the hyperparameters, the model will still be limited by its vocabulary of only 263 words. The model will be more flexible if you train it on a much larger corpus and that's what you'll be doing in this lab. You will use lyrics from more Irish songs then see how the generated text looks like. You will also see how this impacts the process from data preparation to model training. Let's get started! Imports
###Code
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
###Output
_____no_output_____
###Markdown
Building the Word VocabularyYou will first download the lyrics dataset. These will be from a compilation of traditional Irish songs and you can see them [here](https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/main/C3/W4/misc/Laurences_generated_poetry.txt).
###Code
# Download the dataset
!wget https://storage.googleapis.com/tensorflow-1-public/course3/irish-lyrics-eof.txt
###Output
_____no_output_____
###Markdown
Next, you will lowercase and split the plain text into a list of sentences:
###Code
# Load the dataset
data = open('./irish-lyrics-eof.txt').read()
# Lowercase and split the text
corpus = data.lower().split("\n")
# Preview the result
print(corpus)
###Output
_____no_output_____
###Markdown
From here, you can initialize the `Tokenizer` class and generate the word index dictionary:
###Code
# Initialize the Tokenizer class
tokenizer = Tokenizer()
# Generate the word index dictionary
tokenizer.fit_on_texts(corpus)
# Define the total words. You add 1 for the index `0` which is just the padding token.
total_words = len(tokenizer.word_index) + 1
print(f'word index dictionary: {tokenizer.word_index}')
print(f'total words: {total_words}')
###Output
_____no_output_____
###Markdown
Preprocessing the DatasetNext, you will generate the inputs and labels for your model. The process will be identical to the previous lab. The `xs` or inputs to the model will be padded sequences, while the `ys` or labels are one-hot encoded arrays.
###Code
# Initialize the sequences list
input_sequences = []
# Loop over every line
for line in corpus:
# Tokenize the current line
token_list = tokenizer.texts_to_sequences([line])[0]
# Loop over the line several times to generate the subphrases
for i in range(1, len(token_list)):
# Generate the subphrase
n_gram_sequence = token_list[:i+1]
# Append the subphrase to the sequences list
input_sequences.append(n_gram_sequence)
# Get the length of the longest line
max_sequence_len = max([len(x) for x in input_sequences])
# Pad all sequences
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
# Create inputs and label by splitting the last token in the subphrases
xs, labels = input_sequences[:,:-1],input_sequences[:,-1]
# Convert the label into one-hot arrays
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
###Output
_____no_output_____
###Markdown
You can then print some of the examples as a sanity check.
###Code
# Get sample sentence
sentence = corpus[0].split()
print(f'sample sentence: {sentence}')
# Initialize token list
token_list = []
# Look up the indices of each word and append to the list
for word in sentence:
token_list.append(tokenizer.word_index[word])
# Print the token list
print(token_list)
# Pick element
elem_number = 5
# Print token list and phrase
print(f'token list: {xs[elem_number]}')
print(f'decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}')
# Print label
print(f'one-hot label: {ys[elem_number]}')
print(f'index of label: {np.argmax(ys[elem_number])}')
# Pick element
elem_number = 4
# Print token list and phrase
print(f'token list: {xs[elem_number]}')
print(f'decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}')
# Print label
print(f'one-hot label: {ys[elem_number]}')
print(f'index of label: {np.argmax(ys[elem_number])}')
###Output
_____no_output_____
###Markdown
Build and compile the ModelNext, you will build and compile the model. We placed some of the hyperparameters at the top of the code cell so you can easily tweak it later if you want.
###Code
# Hyperparameters
embedding_dim = 100
lstm_units = 150
learning_rate = 0.01
# Build the model
model = Sequential([
Embedding(total_words, embedding_dim, input_length=max_sequence_len-1),
Bidirectional(LSTM(lstm_units)),
Dense(total_words, activation='softmax')
])
# Use categorical crossentropy because this is a multi-class problem
model.compile(
loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
metrics=['accuracy']
)
# Print the model summary
model.summary()
###Output
_____no_output_____
###Markdown
Train the modelFrom the model summary above, you'll notice that the number of trainable params is much larger than the one in the previous lab. Consequently, that usually means a slower training time. It will take roughly 7 seconds per epoch with the GPU enabled in Colab and you'll reach around 76% accuracy after 100 epochs.
###Code
epochs = 100
# Train the model
history = model.fit(xs, ys, epochs=epochs)
###Output
_____no_output_____
###Markdown
You can visualize the accuracy below to see how it fluctuates as the training progresses.
###Code
import matplotlib.pyplot as plt
# Plot utility
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.show()
# Visualize the accuracy
plot_graphs(history, 'accuracy')
###Output
_____no_output_____
###Markdown
Generating TextNow you can let the model make its own songs or poetry! Because it is trained on a much larger corpus, the results below should contain less repetitions as before. The code below picks the next word based on the highest probability output.
###Code
# Define seed text
seed_text = "help me obi-wan kinobi youre my only hope"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
# Convert the seed text to a token sequence
token_list = tokenizer.texts_to_sequences([seed_text])[0]
# Pad the sequence
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
# Feed to the model and get the probabilities for each index
probabilities = model.predict(token_list)
# Get the index with the highest probability
predicted = np.argmax(probabilities, axis=-1)[0]
# Ignore if index is 0 because that is just the padding.
if predicted != 0:
# Look up the word associated with the index.
output_word = tokenizer.index_word[predicted]
# Combine with the seed text
seed_text += " " + output_word
# Print the result
print(seed_text)
###Output
_____no_output_____
###Markdown
Here again is the code that gets the top 3 predictions and picks one at random.
###Code
# Define seed text
seed_text = "help me obi-wan kinobi youre my only hope"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
# Convert the seed text to a token sequence
token_list = tokenizer.texts_to_sequences([seed_text])[0]
# Pad the sequence
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
# Feed to the model and get the probabilities for each index
probabilities = model.predict(token_list)
# Pick a random number from [1,2,3]
choice = np.random.choice([1,2,3])
# Sort the probabilities in ascending order
# and get the random choice from the end of the array
predicted = np.argsort(probabilities)[0][-choice]
# Ignore if index is 0 because that is just the padding.
if predicted != 0:
# Look up the word associated with the index.
output_word = tokenizer.index_word[predicted]
# Combine with the seed text
seed_text += " " + output_word
# Print the result
print(seed_text)
###Output
_____no_output_____
###Markdown
Ungraded Lab: Generating Text from Irish LyricsIn the previous lab, you trained a model on just a single song. You might have found that the output text can quickly become gibberish or repetitive. Even if you tweak the hyperparameters, the model will still be limited by its vocabulary of only 263 words. The model will be more flexible if you train it on a much larger corpus and that's what you'll be doing in this lab. You will use lyrics from more Irish songs then see how the generated text looks like. You will also see how this impacts the process from data preparation to model training. Let's get started! Imports
###Code
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
###Output
_____no_output_____
###Markdown
Building the Word VocabularyYou will first download the lyrics dataset. These will be from a compilation of traditional Irish songs and you can see them [here](https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/main/C3/W4/misc/Laurences_generated_poetry.txt).
###Code
# Download the dataset
!wget https://storage.googleapis.com/tensorflow-1-public/course3/irish-lyrics-eof.txt
###Output
_____no_output_____
###Markdown
Next, you will lowercase and split the plain text into a list of sentences:
###Code
# Load the dataset
data = open('./irish-lyrics-eof.txt').read()
# Lowercase and split the text
corpus = data.lower().split("\n")
# Preview the result
print(corpus)
###Output
_____no_output_____
###Markdown
From here, you can initialize the `Tokenizer` class and generate the word index dictionary:
###Code
# Initialize the Tokenizer class
tokenizer = Tokenizer()
# Generate the word index dictionary
tokenizer.fit_on_texts(corpus)
# Define the total words. You add 1 for the index `0` which is just the padding token.
total_words = len(tokenizer.word_index) + 1
print(f'word index dictionary: {tokenizer.word_index}')
print(f'total words: {total_words}')
###Output
_____no_output_____
###Markdown
Preprocessing the DatasetNext, you will generate the inputs and labels for your model. The process will be identical to the previous lab. The `xs` or inputs to the model will be padded sequences, while the `ys` or labels are one-hot encoded arrays.
###Code
# Initialize the sequences list
input_sequences = []
# Loop over every line
for line in corpus:
# Tokenize the current line
token_list = tokenizer.texts_to_sequences([line])[0]
# Loop over the line several times to generate the subphrases
for i in range(1, len(token_list)):
# Generate the subphrase
n_gram_sequence = token_list[:i+1]
# Append the subphrase to the sequences list
input_sequences.append(n_gram_sequence)
# Get the length of the longest line
max_sequence_len = max([len(x) for x in input_sequences])
# Pad all sequences
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
# Create inputs and label by splitting the last token in the subphrases
xs, labels = input_sequences[:,:-1],input_sequences[:,-1]
# Convert the label into one-hot arrays
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
###Output
_____no_output_____
###Markdown
You can then print some of the examples as a sanity check.
###Code
# Get sample sentence
sentence = corpus[0].split()
print(f'sample sentence: {sentence}')
# Initialize token list
token_list = []
# Look up the indices of each word and append to the list
for word in sentence:
token_list.append(tokenizer.word_index[word])
# Print the token list
print(token_list)
# Pick element
elem_number = 5
# Print token list and phrase
print(f'token list: {xs[elem_number]}')
print(f'decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}')
# Print label
print(f'one-hot label: {ys[elem_number]}')
print(f'index of label: {np.argmax(ys[elem_number])}')
# Pick element
elem_number = 4
# Print token list and phrase
print(f'token list: {xs[elem_number]}')
print(f'decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}')
# Print label
print(f'one-hot label: {ys[elem_number]}')
print(f'index of label: {np.argmax(ys[elem_number])}')
###Output
_____no_output_____
###Markdown
Build and compile the ModelNext, you will build and compile the model. We placed some of the hyperparameters at the top of the code cell so you can easily tweak it later if you want.
###Code
# Hyperparameters
embedding_dim = 100
lstm_units = 150
learning_rate = 0.01
# Build the model
model = Sequential([
Embedding(total_words, embedding_dim, input_length=max_sequence_len-1),
Bidirectional(LSTM(lstm_units)),
Dense(total_words, activation='softmax')
])
# Use categorical crossentropy because this is a multi-class problem
model.compile(
loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
metrics=['accuracy']
)
# Print the model summary
model.summary()
###Output
_____no_output_____
###Markdown
Train the modelFrom the model summary above, you'll notice that the number of trainable params is much larger than the one in the previous lab. Consequently, that usually means a slower training time. It will take roughly 7 seconds per epoch with the GPU enabled in Colab and you'll reach around 76% accuracy after 100 epochs.
###Code
epochs = 100
# Train the model
history = model.fit(xs, ys, epochs=epochs)
###Output
_____no_output_____
###Markdown
You can visualize the accuracy below to see how it fluctuates as the training progresses.
###Code
import matplotlib.pyplot as plt
# Plot utility
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.show()
# Visualize the accuracy
plot_graphs(history, 'accuracy')
###Output
_____no_output_____
###Markdown
Generating TextNow you can let the model make its own songs or poetry! Because it is trained on a much larger corpus, the results below should contain less repetitions as before. The code below picks the next word based on the highest probability output.
###Code
# Define seed text
seed_text = "help me obi-wan kinobi youre my only hope"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
# Convert the seed text to a token sequence
token_list = tokenizer.texts_to_sequences([seed_text])[0]
# Pad the sequence
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
# Feed to the model and get the probabilities for each index
probabilities = model.predict(token_list)
# Get the index with the highest probability
predicted = np.argmax(probabilities, axis=-1)[0]
# Ignore if index is 0 because that is just the padding.
if predicted != 0:
# Look up the word associated with the index.
output_word = tokenizer.index_word[predicted]
# Combine with the seed text
seed_text += " " + output_word
# Print the result
print(seed_text)
###Output
_____no_output_____
###Markdown
Here again is the code that gets the top 3 predictions and picks one at random.
###Code
# Define seed text
seed_text = "help me obi-wan kinobi youre my only hope"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
# Convert the seed text to a token sequence
token_list = tokenizer.texts_to_sequences([seed_text])[0]
# Pad the sequence
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
# Feed to the model and get the probabilities for each index
probabilities = model.predict(token_list)
# Pick a random number from [1,2,3]
choice = np.random.choice([1,2,3])
# Sort the probabilities in ascending order
# and get the random choice from the end of the array
predicted = np.argsort(probabilities)[0][-choice]
# Ignore if index is 0 because that is just the padding.
if predicted != 0:
# Look up the word associated with the index.
output_word = tokenizer.index_word[predicted]
# Combine with the seed text
seed_text += " " + output_word
# Print the result
print(seed_text)
###Output
_____no_output_____
###Markdown
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
**Note:** This notebook can run using TensorFlow 2.5.0
###Code
#!pip install tensorflow==2.5.0
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
import numpy as np
# irish-lyrics-eof.txt
!gdown --id 15UqmiIm0xwh9mt0IYq2z3jHaauxQSTQT
tokenizer = Tokenizer()
data = open('./irish-lyrics-eof.txt').read()
corpus = data.lower().split("\n")
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
print(tokenizer.word_index)
print(total_words)
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i+1]
input_sequences.append(n_gram_sequence)
# pad sequences
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
# create predictors and label
xs, labels = input_sequences[:,:-1],input_sequences[:,-1]
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
print(tokenizer.word_index['in'])
print(tokenizer.word_index['the'])
print(tokenizer.word_index['town'])
print(tokenizer.word_index['of'])
print(tokenizer.word_index['athy'])
print(tokenizer.word_index['one'])
print(tokenizer.word_index['jeremy'])
print(tokenizer.word_index['lanigan'])
print(xs[6])
print(ys[6])
print(xs[5])
print(ys[5])
print(tokenizer.word_index)
model = Sequential()
model.add(Embedding(total_words, 100, input_length=max_sequence_len-1)) # 100
model.add(Bidirectional(LSTM(150)))
model.add(Dense(total_words, activation='softmax'))
adam = Adam(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
#earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')
history = model.fit(xs, ys, epochs=100, verbose=1)
#print model.summary()
print(model)
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.show()
plot_graphs(history, 'accuracy')
seed_text = "I've got a bad feeling about this"
next_words = 100
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
predicted = model.predict_classes(token_list, verbose=0)
output_word = ""
for word, index in tokenizer.word_index.items():
if index == predicted:
output_word = word
break
seed_text += " " + output_word
print(seed_text)
###Output
_____no_output_____
###Markdown
Ungraded Lab: Generating Text from Irish LyricsIn the previous lab, you trained a model on just a single song. You might have found that the output text can quickly become gibberish or repetitive. Even if you tweak the hyperparameters, the model will still be limited by its vocabulary of only 263 words. The model will be more flexible if you train it on a much larger corpus and that's what you'll be doing in this lab. You will use lyrics from more Irish songs then see how the generated text looks like. You will also see how this impacts the process from data preparation to model training. Let's get started! Imports
###Code
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, LSTM, Dense, Bidirectional
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
###Output
_____no_output_____
###Markdown
Building the Word VocabularyYou will first download the lyrics dataset. These will be from a compilation of traditional Irish songs and you can see them [here](https://github.com/https-deeplearning-ai/tensorflow-1-public/blob/main/C3/W4/misc/Laurences_generated_poetry.txt).
###Code
# Download the dataset
!gdown --id 15UqmiIm0xwh9mt0IYq2z3jHaauxQSTQT
###Output
_____no_output_____
###Markdown
Next, you will lowercase and split the plain text into a list of sentences:
###Code
# Load the dataset
data = open('./irish-lyrics-eof.txt').read()
# Lowercase and split the text
corpus = data.lower().split("\n")
# Preview the result
print(corpus)
###Output
_____no_output_____
###Markdown
From here, you can initialize the `Tokenizer` class and generate the word index dictionary:
###Code
# Initialize the Tokenizer class
tokenizer = Tokenizer()
# Generate the word index dictionary
tokenizer.fit_on_texts(corpus)
# Define the total words. You add 1 for the index `0` which is just the padding token.
total_words = len(tokenizer.word_index) + 1
print(f'word index dictionary: {tokenizer.word_index}')
print(f'total words: {total_words}')
###Output
_____no_output_____
###Markdown
Preprocessing the DatasetNext, you will generate the inputs and labels for your model. The process will be identical to the previous lab. The `xs` or inputs to the model will be padded sequences, while the `ys` or labels are one-hot encoded arrays.
###Code
# Initialize the sequences list
input_sequences = []
# Loop over every line
for line in corpus:
# Tokenize the current line
token_list = tokenizer.texts_to_sequences([line])[0]
# Loop over the line several times to generate the subphrases
for i in range(1, len(token_list)):
# Generate the subphrase
n_gram_sequence = token_list[:i+1]
# Append the subphrase to the sequences list
input_sequences.append(n_gram_sequence)
# Get the length of the longest line
max_sequence_len = max([len(x) for x in input_sequences])
# Pad all sequences
input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre'))
# Create inputs and label by splitting the last token in the subphrases
xs, labels = input_sequences[:,:-1],input_sequences[:,-1]
# Convert the label into one-hot arrays
ys = tf.keras.utils.to_categorical(labels, num_classes=total_words)
###Output
_____no_output_____
###Markdown
You can then print some of the examples as a sanity check.
###Code
# Get sample sentence
sentence = corpus[0].split()
print(f'sample sentence: {sentence}')
# Initialize token list
token_list = []
# Look up the indices of each word and append to the list
for word in sentence:
token_list.append(tokenizer.word_index[word])
# Print the token list
print(token_list)
# Pick element
elem_number = 5
# Print token list and phrase
print(f'token list: {xs[elem_number]}')
print(f'decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}')
# Print label
print(f'one-hot label: {ys[elem_number]}')
print(f'index of label: {np.argmax(ys[elem_number])}')
# Pick element
elem_number = 4
# Print token list and phrase
print(f'token list: {xs[elem_number]}')
print(f'decoded to text: {tokenizer.sequences_to_texts([xs[elem_number]])}')
# Print label
print(f'one-hot label: {ys[elem_number]}')
print(f'index of label: {np.argmax(ys[elem_number])}')
###Output
_____no_output_____
###Markdown
Build and compile the ModelNext, you will build and compile the model. We placed some of the hyperparameters at the top of the code cell so you can easily tweak it later if you want.
###Code
# Hyperparameters
embedding_dim = 100
lstm_units = 150
learning_rate = 0.01
# Build the model
model = Sequential([
Embedding(total_words, embedding_dim, input_length=max_sequence_len-1),
Bidirectional(LSTM(lstm_units)),
Dense(total_words, activation='softmax')
])
# Use categorical crossentropy because this is a multi-class problem
model.compile(
loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
metrics=['accuracy']
)
# Print the model summary
model.summary()
###Output
_____no_output_____
###Markdown
Train the modelFrom the model summary above, you'll notice that the number of trainable params is much larger than the one in the previous lab. Consequently, that usually means a slower training time. It will take roughly 7 seconds per epoch with the GPU enabled in Colab and you'll reach around 76% accuracy after 100 epochs.
###Code
epochs = 100
# Train the model
history = model.fit(xs, ys, epochs=epochs)
###Output
_____no_output_____
###Markdown
You can visualize the accuracy below to see how it fluctuates as the training progresses.
###Code
import matplotlib.pyplot as plt
# Plot utility
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.show()
# Visualize the accuracy
plot_graphs(history, 'accuracy')
###Output
_____no_output_____
###Markdown
Generating TextNow you can let the model make its own songs or poetry! Because it is trained on a much larger corpus, the results below should contain less repetitions as before. The code below picks the next word based on the highest probability output.
###Code
# Define seed text
seed_text = "help me obi-wan kinobi youre my only hope"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
# Convert the seed text to a token sequence
token_list = tokenizer.texts_to_sequences([seed_text])[0]
# Pad the sequence
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
# Feed to the model and get the probabilities for each index
probabilities = model.predict(token_list)
# Get the index with the highest probability
predicted = np.argmax(probabilities, axis=-1)[0]
# Ignore if index is 0 because that is just the padding.
if predicted != 0:
# Look up the word associated with the index.
output_word = tokenizer.index_word[predicted]
# Combine with the seed text
seed_text += " " + output_word
# Print the result
print(seed_text)
###Output
_____no_output_____
###Markdown
Here again is the code that gets the top 3 predictions and picks one at random.
###Code
# Define seed text
seed_text = "help me obi-wan kinobi youre my only hope"
# Define total words to predict
next_words = 100
# Loop until desired length is reached
for _ in range(next_words):
# Convert the seed text to a token sequence
token_list = tokenizer.texts_to_sequences([seed_text])[0]
# Pad the sequence
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
# Feed to the model and get the probabilities for each index
probabilities = model.predict(token_list)
# Pick a random number from [1,2,3]
choice = np.random.choice([1,2,3])
# Sort the probabilities in ascending order
# and get the random choice from the end of the array
predicted = np.argsort(probabilities)[0][-choice]
# Ignore if index is 0 because that is just the padding.
if predicted != 0:
# Look up the word associated with the index.
output_word = tokenizer.index_word[predicted]
# Combine with the seed text
seed_text += " " + output_word
# Print the result
print(seed_text)
###Output
_____no_output_____ |
tensorflow/LSTM.ipynb | ###Markdown
Data preprocessing
###Code
from string import punctuation
all_text = ''.join([c for c in reviews if c not in punctuation])
reviews = all_text.split('\n')
all_text = ' '.join(reviews)
words = all_text.split()
all_text[:2000]
words[:100]
###Output
_____no_output_____
###Markdown
Encoding the words
###Code
# # Create your dictionary that maps vocab words to integers here
# vocab = set(words)
# vocab_to_int = {w: i for i, w in enumerate(vocab, 1)}
# print(len(vocab_to_int))
# # Convert the reviews to integers, same shape as reviews list, but with integers
# reviews_ints = []
# for r in reviews:
# ri = [vocab_to_int.get(w) for w in r if vocab_to_int.get(w) is not None]
# reviews_ints.append(ri)
# reviews_ints[:10]
from collections import Counter
counts = Counter(words)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
reviews_ints = []
for each in reviews:
reviews_ints.append([vocab_to_int[word] for word in each.split()])
print(len(reviews_ints))
###Output
25001
###Markdown
Encoding the labels
###Code
# Convert labels to 1s and 0s for 'positive' and 'negative'
# print(labels_org)
labels = np.array([1 if l == "positive" else 0 for l in labels_org.split()])
# print(labels)
print(len(labels))
from collections import Counter
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
# Filter out that review with 0 length
reviews_ints = [r[0:200] for r in reviews_ints if len(r) > 0]
from collections import Counter
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
seq_len = 200
features = np.zeros((len(reviews_ints), seq_len), dtype=int)
# print(features[:10,:100])
for i, row in enumerate(reviews_ints):
features[i, -len(row):] = np.array(row)[:seq_len]
features[:10,:100]
# features = []
# for r in reviews_ints:
# features.append(np.pad(r, (0, 200 - len(r)), 'constant').tolist())
# interesting = []
# for i, r in enumerate(reviews_ints):
# if len(r) < 200:
# interesting.append(i)
# features = np.array([np.pad(r, (len(r), 200-len(r)), 'constant').tolist() for r in reviews_ints])
# print(interesting)
print(len(features))
print(type(features))
print(features[41])
print(len(features[41]))
print(reviews_ints[41])
print(len(reviews_ints[41]))
# features[:100]
###Output
25000
<class 'numpy.ndarray'>
[ 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0
1 103 26 6118 2 1 226 6 367 8 13 3
433 4 1 1933 4 20028 2 21497]
200
[1, 103, 26, 6118, 2, 1, 226, 6, 367, 8, 13, 3, 433, 4, 1, 1933, 4, 20028, 2, 21497]
20
###Markdown
Training, Validation, Test With our data in nice shape, we'll split it into training, validation, and test sets.
###Code
split_frac = 0.8
split_index = int(split_frac * len(features))
train_x, val_x = features[:split_index], features[split_index:]
train_y, val_y = labels[:split_index], labels[split_index:]
split_frac = 0.5
split_index = int(split_frac * len(val_x))
val_x, test_x = val_x[:split_index], val_x[split_index:]
val_y, test_y = val_y[:split_index], val_y[split_index:]
print("\t\t\tFeature Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nValidation set: \t{}".format(val_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
print("label set: \t\t{}".format(train_y.shape),
"\nValidation label set: \t{}".format(val_y.shape),
"\nTest label set: \t\t{}".format(test_y.shape))
lstm_size = 256
lstm_layers = 2
batch_size = 1000
learning_rate = 0.01
###Output
_____no_output_____
###Markdown
For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be `batch_size` vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.
###Code
n_words = len(vocab_to_int) + 1 # Add 1 for 0 added to vocab
# Create the graph object
tf.reset_default_graph()
with tf.name_scope('inputs'):
inputs_ = tf.placeholder(tf.int32, [None, None], name="inputs")
labels_ = tf.placeholder(tf.int32, [None, None], name="labels")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
# Size of the embedding vectors (number of units in the embedding layer)
embed_size = 300
with tf.name_scope("Embeddings"):
embedding = tf.Variable(tf.random_uniform((n_words, embed_size), -1, 1))
embed = tf.nn.embedding_lookup(embedding, inputs_)
###Output
_____no_output_____
###Markdown
LSTM cell
###Code
def lstm_cell():
# Your basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size, reuse=tf.get_variable_scope().reuse)
# Add dropout to the cell
return tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
with tf.name_scope("RNN_layers"):
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([lstm_cell() for _ in range(lstm_layers)])
# Getting an initial state of all zeros
initial_state = cell.zero_state(batch_size, tf.float32)
###Output
_____no_output_____
###Markdown
RNN forward pass
###Code
with tf.name_scope("RNN_forward"):
outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)
###Output
_____no_output_____
###Markdown
Output
###Code
with tf.name_scope('predictions'):
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
tf.summary.histogram('predictions', predictions)
with tf.name_scope('cost'):
cost = tf.losses.mean_squared_error(labels_, predictions)
tf.summary.scalar('cost', cost)
with tf.name_scope('train'):
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
merged = tf.summary.merge_all()
###Output
_____no_output_____
###Markdown
Validation accuracyHere we can add a few nodes to calculate the accuracy which we'll use in the validation pass.
###Code
with tf.name_scope('validation'):
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
###Output
_____no_output_____
###Markdown
BatchingThis is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the `x` and `y` arrays and returns slices out of those arrays with size `[batch_size]`.
###Code
def get_batches(x, y, batch_size=100):
n_batches = len(x)//batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for ii in range(0, len(x), batch_size):
yield x[ii:ii+batch_size], y[ii:ii+batch_size]
###Output
_____no_output_____
###Markdown
TrainingBelow is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the `checkpoints` directory exists.
###Code
epochs = 10
# with graph.as_default():
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter('./logs/tb/train', sess.graph)
test_writer = tf.summary.FileWriter('./logs/tb/test', sess.graph)
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 0.5,
initial_state: state}
summary, loss, state, _ = sess.run([merged, cost, final_state, optimizer], feed_dict=feed)
# loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
train_writer.add_summary(summary, iteration)
if iteration%5==0:
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Train loss: {:.3f}".format(loss))
if iteration%25==0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(val_x, val_y, batch_size):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: val_state}
# batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
summary, batch_acc, val_state = sess.run([merged, accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print("Val acc: {:.3f}".format(np.mean(val_acc)))
iteration +=1
test_writer.add_summary(summary, iteration)
saver.save(sess, "checkpoints/sentiment_manish.ckpt")
saver.save(sess, "checkpoints/sentiment_manish.ckpt")
###Output
Epoch: 0/10 Iteration: 5 Train loss: 0.286
Epoch: 0/10 Iteration: 10 Train loss: 0.260
Epoch: 0/10 Iteration: 15 Train loss: 0.249
Epoch: 0/10 Iteration: 20 Train loss: 0.241
Epoch: 1/10 Iteration: 25 Train loss: 0.212
Val acc: 0.673
Epoch: 1/10 Iteration: 30 Train loss: 0.180
Epoch: 1/10 Iteration: 35 Train loss: 0.181
Epoch: 1/10 Iteration: 40 Train loss: 0.249
Epoch: 2/10 Iteration: 45 Train loss: 0.220
Epoch: 2/10 Iteration: 50 Train loss: 0.168
Val acc: 0.704
Epoch: 2/10 Iteration: 55 Train loss: 0.132
Epoch: 2/10 Iteration: 60 Train loss: 0.103
Epoch: 3/10 Iteration: 65 Train loss: 0.140
Epoch: 3/10 Iteration: 70 Train loss: 0.114
Epoch: 3/10 Iteration: 75 Train loss: 0.097
Val acc: 0.762
Epoch: 3/10 Iteration: 80 Train loss: 0.044
Epoch: 4/10 Iteration: 85 Train loss: 0.026
Epoch: 4/10 Iteration: 90 Train loss: 0.006
Epoch: 4/10 Iteration: 95 Train loss: 0.004
Epoch: 4/10 Iteration: 100 Train loss: 0.000
Val acc: 0.722
Epoch: 5/10 Iteration: 105 Train loss: 0.108
Epoch: 5/10 Iteration: 110 Train loss: 0.043
Epoch: 5/10 Iteration: 115 Train loss: 0.016
Epoch: 5/10 Iteration: 120 Train loss: 0.021
Epoch: 6/10 Iteration: 125 Train loss: 0.026
Val acc: 0.816
Epoch: 6/10 Iteration: 130 Train loss: 0.022
Epoch: 6/10 Iteration: 135 Train loss: 0.002
Epoch: 6/10 Iteration: 140 Train loss: 0.003
Epoch: 7/10 Iteration: 145 Train loss: 0.008
Epoch: 7/10 Iteration: 150 Train loss: 0.020
Val acc: 0.783
Epoch: 7/10 Iteration: 155 Train loss: 0.055
Epoch: 7/10 Iteration: 160 Train loss: 0.049
Epoch: 8/10 Iteration: 165 Train loss: 0.025
Epoch: 8/10 Iteration: 170 Train loss: 0.021
Epoch: 8/10 Iteration: 175 Train loss: 0.030
Val acc: 0.799
Epoch: 8/10 Iteration: 180 Train loss: 0.002
Epoch: 9/10 Iteration: 185 Train loss: 0.016
Epoch: 9/10 Iteration: 190 Train loss: 0.005
Epoch: 9/10 Iteration: 195 Train loss: 0.001
Epoch: 9/10 Iteration: 200 Train loss: 0.000
Val acc: 0.791
###Markdown
Testing
###Code
test_acc = []
with tf.Session() as sess:
saver.restore(sess, "checkpoints/sentiment_manish.ckpt")
test_state = sess.run(cell.zero_state(batch_size, tf.float32))
for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: test_state}
batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
test_acc.append(batch_acc)
print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
###Output
INFO:tensorflow:Restoring parameters from checkpoints/sentiment_manish.ckpt
Test accuracy: 0.804
|
src/notebooks/testing_network.ipynb | ###Markdown
Get Started with pulling the code
###Code
import os
from getpass import getpass
import urllib
user = input('User name: ')
password = getpass('Password: ')
password = urllib.parse.quote(password) # your password is converted into url format
repo_name = "UNAST.git"
cmd_string = 'git clone https://{0}:{1}@github.com/{0}/{2}'.format(user, password, repo_name)
!{cmd_string}
%cd UNAST
!git checkout model-implementation/lucas
###Output
_____no_output_____
###Markdown
Add parent directory to path if needed
###Code
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
###Output
_____no_output_____
###Markdown
Lets Make some dummy data
###Code
import torch
from module import RNNEncoder, RNNDecoder
from network import Discriminator
# [batch_size x seq_len x hidden_dim] expected into the network
hidden = 512
latent = 64
out = 100
network_in_shape = (128, 40, 512)
dummy = torch.randn(network_in_shape)
###Output
_____no_output_____
###Markdown
Let's Make a dummy network
###Code
encoder = RNNEncoder(hidden, hidden, latent, num_layers=5, bidirectional=False)
decoder = RNNDecoder(latent, hidden, hidden, out, num_layers=5, attention=True)
discriminator = Discriminator(hidden)
###Output
_____no_output_____
###Markdown
Now, run the network and lets see how we do!
###Code
output, (latent_hidden, latent_cell) = encoder(dummy)
print(output.shape)
print(latent_hidden.shape)
print(latent_cell.shape)
print(latent_hidden.shape)
input = latent_hidden.permute(1, 0, 2)
#mask = torch.zeros(dummy.shape[0:2])
#print("MASK shape", mask.shape)
#output_probs, hidden = decoder(input[:, -1:, :], (latent_hidden, latent_cell), output, mask)
#print("\nDecoder output shapes: ")
#print(output_probs.shape)
discriminator_out = discriminator(latent_hidden[-1])
print("\nDiscriminator output shape:")
print(discriminator_out.shape)
###Output
torch.Size([128, 40, 64])
torch.Size([5, 128, 512])
torch.Size([5, 128, 512])
torch.Size([5, 128, 512])
Discriminator output shape:
torch.Size([128, 2])
###Markdown
Testing Smoothed CE loss
###Code
fake_output = torch.zeros_like(discriminator_out[0])
fake_output[:,] = torch.tensor([1,])
fake_output.shape
empty_target = torch.zeros_like(fake_output)
empty_target[:,] = torch.tensor([1,0])
empty_target.shape
empty_target
import torch.nn.functional as F
import torch.nn as nn
def cross_entropy(input, target, size_average=True):
""" Cross entropy that accepts soft targets
Args:
pred: predictions for neural network
targets: targets, can be soft
size_average: if false, sum is returned instead of mean
Examples::
input = torch.FloatTensor([[1.1, 2.8, 1.3], [1.1, 2.1, 4.8]])
input = torch.autograd.Variable(out, requires_grad=True)
target = torch.FloatTensor([[0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])
target = torch.autograd.Variable(y1)
loss = cross_entropy(input, target)
loss.backward()
"""
logsoftmax = nn.LogSoftmax(1)
if size_average:
return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))
else:
return torch.sum(torch.sum(-target * logsoftmax(input), dim=1))
cross_entropy(discriminator_out[0], empty_target)
cross_entropy(torch.Tensor([[1,0], [2,0]]), torch.Tensor([[1,0], [1,0]]))
F.cross_entropy(torch.FloatTensor([[1,0], [2,0]]), torch.LongTensor([0,0]))
###Output
_____no_output_____ |
variational.ipynb | ###Markdown
[Automatic variational ABC](https://arxiv.org/abs/1606.08549) for exoplanets?
###Code
%matplotlib inline
%config IPython.matplotlib.backend = "retina"
from matplotlib import rcParams
rcParams["savefig.dpi"] = 300
rcParams["figure.dpi"] = 300
from autograd import grad
import autograd.numpy as np
from autograd.scipy.misc import logsumexp
from autograd.optimizers import adam
import matplotlib.pyplot as plt
def completeness(x, y):
snr = y**2 * np.sqrt(2000.0 / x) * 1e3
return 1.0 / (1 + np.exp(-0.3 * (snr - 10.0)))
def power_law(u, n, mn, mx):
np1 = n+1.0
if np.allclose(np1, 0.0):
return mn * np.exp(u * (np.log(mx) - np.log(mn)))
x0n = mn ** np1
return ((mx**np1 - x0n) * u + x0n) ** (1.0 / np1)
N_tot = 1000
K = np.random.poisson(0.5 * N_tot)
XY_true = np.vstack((
10**np.random.uniform(0, 2, K),
power_law(np.random.rand(K), -1.5, 0.01, 0.1),
)).T
Q = completeness(XY_true[:, 0], XY_true[:, 1])
XY_obs = np.array(XY_true[Q > np.random.rand(K)])
class Simulator(object):
def __init__(self, XY, N_tot, x_range, y_range,
mu_lg, sig_lg, mu_nx, sig_nx, mu_ny, sig_ny):
self.N_tot = N_tot
self.x_range = x_range
self.y_range = y_range
self.stats_obs = self.stats(XY[:, 0], XY[:, 1])
# Prior
self.priors = [(mu_lg, sig_lg), (mu_nx, sig_nx), (mu_ny, sig_ny)]
def kld(self, phi):
kld = 0.0
for (mu1, lsig1), (mu2, sig2) in zip((phi[:2], phi[2:4], phi[4:6]), self.priors):
sig1 = np.exp(lsig1)
kld += np.log(sig2/sig1)+0.5*((sig1**2+(mu1-mu2)**2)/sig2**2-1.0)
return kld
def stats(self, x, y):
x, y = np.log(x), np.log(y)
return np.array([10*np.log(len(x)), np.mean(x), np.log(np.var(x)), np.mean(y), np.log(np.var(y))])
def _f(self, theta, u_exist, u_det, u_x, u_y):
q = u_exist < np.exp(theta[0])
x = power_law(u_x[q], theta[1], *(self.x_range))
y = power_law(u_y[q], theta[2], *(self.y_range))
q_det = completeness(x, y) > u_det[q]
return x[q_det], y[q_det]
def _g(self, phi, nu):
return np.array([
phi[0] + np.exp(phi[1]) * nu[0],
phi[2] + np.exp(phi[3]) * nu[1],
phi[4] + np.exp(phi[5]) * nu[2],
])
def simulate(self, theta):
return self._f(theta, *(np.random.rand(4, self.N_tot)))
def sample(self, phi):
theta = self._g(phi, np.random.randn(3))
return self.simulate(theta)
def _log_p_eps(self, phi, nu, u, eps=0.1):
theta = self._g(phi, nu)
sim = self._f(theta, *u)
if len(sim[0]) < 2:
return -np.inf
stats = self.stats(*sim)
return -0.5 * np.sum((stats - self.stats_obs)**2 / eps**2 + np.log(2*np.pi*eps**2))
def elbo_abc(self, phi, iteration, S=10, L=8):
print(iteration)
nu = np.random.randn(S, 3)
u = np.random.rand(L, 4, self.N_tot)
lp = 0.0
for s in range(S):
lp += logsumexp(np.array([self._log_p_eps(phi, nu[s], u[l]) for l in range(L)]))
lp /= S
elbo = lp - np.log(L) - self.kld(phi)
print(elbo)
return -elbo
sim = Simulator(XY_obs, N_tot, (1.0, 100.0), (0.01, 0.1),
-1.0, 5.0, 0.0, 3.0, 0.0, 3.0)
objective_grad = grad(sim.elbo_abc)
init_params = np.array([np.log(0.5), np.log(0.001), -1.0, np.log(1.0), -1.5, np.log(1.0)])
batch_size = 256
num_epochs = 5
step_size = 0.01
optimized_params = adam(objective_grad, init_params, step_size=step_size,
num_iters=500) #, callback=print_perf)
optimized_params[::2], np.exp(optimized_params[1::2])
init_params
x, y = sim.sample(optimized_params)
sim.stats(x, y)
sim.stats_obs
plt.plot(XY_obs[:, 0], XY_obs[:, 1], ".")
plt.plot(x, y, ".")
plt.xscale("log")
plt.yscale("log")
_, bins, _ = plt.hist(XY_obs[:, 0], histtype="step")
plt.hist(x, bins, histtype="step");
_, bins, _ = plt.hist(XY_obs[:, 1], histtype="step")
plt.hist(y, bins, histtype="step");
###Output
_____no_output_____ |
survey_cleaning.ipynb | ###Markdown
Innisfil Groceries Survey
###Code
import pandas as pd
df = pd.read_csv('apr17_survey.csv', encoding = 'UTF-8')
df.head()
col_names = ['date', 'Q1', 'Q2', 'Q3', 'Q4', 'Q5', 'Q6', 'Q7', 'Q8', 'Q9', 'Q10', 'Q11', 'email']
df.columns = col_names
df.head()
df.duplicated().sum()
df.isna().sum()
df = df.dropna(subset = ['Q1'])
df.isna().sum()
###Output
_____no_output_____
###Markdown
Question 1
###Code
df['Q1'].unique()
mask_Essa = df['Q1'].str.match('.*[Ee]ssa.*')
mask_Alliston = df['Q1'].str.match('.*[Aa]lliston.*')
mask_Belle = df['Q1'].str.match('.*[Bb]elle [Ee]wart.*')
df.loc[mask_Essa, 'Q1'] = 'Essa'
df.loc[mask_Alliston, 'Q1'] = 'Alliston'
df.loc[mask_Belle, 'Q1'] = 'Belle Ewart'
df['Q1'].value_counts()
mask_other = df['Q1'] == 'Cookstown'
df.loc[~mask_other, 'Q1'] = 'Other'
df['Q1'].value_counts()
###Output
_____no_output_____
###Markdown
Question 2
###Code
df['Q2'].unique()
mask_Cookstown = df['Q1'] == 'Cookstown'
df.loc[mask_Cookstown, 'Q2'].value_counts()
df['Q2'] = df['Q2'].fillna('Foodland')
mask_Zehrs = df['Q2'].str.match('.*[Zzehrs\']{4,5}.*')
mask_Frills = df['Q2'].str.match('.*[Nno Ffrils]{9}.*')
mask_Costco = df['Q2'].str.match('.*[Ccost]{6}.*')
mask_Foodland = df['Q2'].str.match('.*[FfoodlandL ]{8,9}.*')
mask_Walmart = df['Q2'].str.match('.*[Wwalmrt]{7,8}.*')
mask_Sobeys = df['Q2'].str.match('.*[Ssobe\'y’]{6,7}.*')
mask_Basic = df['Q2'].str.match('.*[Ffod Bbasics]{10,11}.*')
df['Zehrs'] = 0
df.loc[mask_Zehrs, 'Zehrs'] = 1
df['Frills'] = 0
df.loc[mask_Frills, 'Frills'] = 1
df['Costco'] = 0
df.loc[mask_Costco, 'Costco'] = 1
df['Foodland'] = 0
df.loc[mask_Foodland, 'Foodland'] = 1
df['Walmart'] = 0
df.loc[mask_Walmart, 'Walmart'] = 1
df['Sobeys'] = 0
df.loc[mask_Sobeys, 'Sobeys'] = 1
df['Basic'] = 0
df.loc[mask_Basic, 'Basic'] = 1
df.head()
df.loc[:, 'Zehrs':].sum()
###Output
_____no_output_____
###Markdown
Question 3
###Code
df['Q3'].unique()
###Output
_____no_output_____
###Markdown
Question 4
###Code
df['Q4'].unique()
###Output
_____no_output_____
###Markdown
Question 5
###Code
df['Q5'].unique()
df['Q5'].value_counts()
df['Q5'] = df['Q5'].fillna('Pick Up')
###Output
_____no_output_____
###Markdown
Question 6
###Code
df['Q6'].unique()
df['Q6'] = df['Q6'].fillna('5')
df.loc[df['Q6'].str.match('.*\$?5\$?.*'), 'Q6']
mask_0 = df['Q6'].str.match('^\s*\$?0\$?\s*$')
mask_0_2 = df['Q6'].str.match('No.*')
mask_0_3 = df['Q6'].str.match('Free')
mask_0_4 = df['Q6'].str.match('.*[Pp]ick up.*')
df.loc[mask_0 | mask_0_2 | mask_0_3 | mask_0_4, 'Q6'] = '0'
mask_5 = df['Q6'].str.match('^\s*\$?[1-5](.00)?\$?\s*$')
mask_5_2 = df['Q6'].str.match('.*\D*5(.00)?\$?\s*')
mask_5_3 = df['Q6'].str.match('Depend.*')
df.loc[mask_5 | mask_5_2 | mask_5_3, 'Q6'] = '1'
mask_10 = df['Q6'].str.match('^\s*\$?10(.00)?\$?\s*$')
mask_10_2 = df['Q6'].str.match('.*[6-9].*')
mask_10_3 = df['Q6'].str.match('\D*10(.00)?\D*')
df.loc[mask_10 | mask_10_2 | mask_10_3, 'Q6'] = '2'
mask_high = df['Q6'].str.match('^\$(20)?(30)?$')
df.loc[mask_high, 'Q6'] = '3'
mask_final = df['Q6'].str.match('.*[A-z].*')
df.loc[mask_final, 'Q6'] = '1'
df['Q6'] = df['Q6'].astype('int')
df['Q6'].value_counts()
###Output
_____no_output_____
###Markdown
Question 7
###Code
df['Q7'].unique()
df['Q7'].value_counts()
df['Q7'] = df['Q7'].fillna('Almost never')
###Output
_____no_output_____
###Markdown
Question 8
###Code
df['Q8'].unique()
df['Q8'] = df['Q8'].fillna('Yes')
mask_Yes = df['Q8'].str.match('.*[Yyes].*')
mask_Yes2 = df['Q8'].str.match('.*Bradford.*')
df.loc[(mask_Yes | mask_Yes2), 'Q8'] = 'Yes'
mask_No = df['Q8'].str.match('^[Nno]{2}.*')
mask_No2 = df['Q8'].str.match('.*\Wno\W.*')
mask_No3 = ~(mask_Yes | mask_Yes2 | mask_No | mask_No2)
df.loc[(mask_No | mask_No2 | mask_No3), 'Q8'] = 'No'
df['Q8'].value_counts()
###Output
_____no_output_____
###Markdown
Question 9
###Code
df['Q9'].unique()
df['Q9'] = df['Q9'].astype('int')
df['Q9'].value_counts()
###Output
_____no_output_____
###Markdown
Question 10
###Code
df['Q10'] = df['Q10'].fillna('NR')
###Output
_____no_output_____
###Markdown
Question 11
###Code
df['Q11'] = df['Q11'].fillna('NR')
df.to_csv('survey_clean.csv', index = False, encoding = 'UTF-8')
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.